var/home/core/zuul-output/0000755000175000017500000000000015136452303014527 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015136464214015477 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000327774215136464124020302 0ustar corecoreThzikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD "mv?_eGbuuțx{w7ݭ7֫L% oo/q3m^]/o?8.7oW}ʋghewx/mX,ojŻ ^Tb3b#׳:}=p7뼝ca㑔`e0I1Q!&ѱ[/o^{W-{t3_U|6 x)K#/5ΌR"ggóisR)N %emOQ/Ϋ[oa0vs68/Jʢ ܚʂ9ss3+aô٥J}{37׌Šgv cXk?`;'`&R7߿YKS'owHF6":=3Ȑ 3xҝd){Ts}cZ%BdARO#-o"D"ޮrFg4" 0ʡPBU[fi;dYu' IAgfPF:c0Ys66q tH6#.`$vlLH}ޭA㑝V0>|J\Pg\W#NqɌDSd1d9nT#Abn q1J# !8,$RNI? j!bE"o j/o\E`r"hA ós yi\[.!=A(%Ud,QwC}F][UVYE NQGn0Ƞɻ>.ww}(o./WY<͉#5O H 'wo6C9yg|O~ €'} S[q?,!yq%a:y<\tunL h%$Ǥ].v y[W_` \r/Ɛ%aޗ' B.-^ mQYd'xP2ewEڊL|^ͣrZg7n͐AG%ʷr<>; 2W>h?y|(G>ClsXT(VIx$(J:&~CQpkۗgVKx*lJ3o|s`<՛=JPBUGߩnX#;4ٻO2{Fݫr~AreFj?wQC9yO|$UvވkZoIfzC|]|[>ӸUKҳt17ä$ ֈm maUNvS_$qrMY QOΨN!㞊;4U^Z/ QB?q3En.اeI"X#gZ+Xk?povR]8~깮$b@n3xh!|t{: CºC{ 8Ѿm[ ~z/9آs;DPsif39HoN λC?; H^-¸oZ( +"@@%'0MtW#:7erԮoQ#% H!PK)~U,jxQV^pΣ@Klb5)%L%7׷v] gv6دϾDD}c6  %T%St{kJ_O{*Z8Y CEO+'HqZY PTUJ2dic3w ?YQgpa` Z_0΁?kMPc_Ԝ*΄Bs`kmJ?t 53@հ1hr}=5t;nt 9:I_|AאM'NO;uD,z҄R K&Nh c{A`?2ZҘ[a-0V&2D[d#L6l\Jk}8gf) afs'oIf'mf\>UxR ks J)'u4iLaNIc2qdNA&aLQVD R0*06V۽棬mpھ*V I{a 0Ҟҝ>Ϗ ,ȓw`Ȅ/2Zjǽ}W4D)3N*[kPF =trSE *b9ē7$ M_8.Ç"q ChCMAgSdL0#W+CUu"k"圀̲F9,,&h'ZJz4U\d +( 7EqڏuC+]CEF 8'9@OVvnNbm: X„RDXfיa }fqG*YƩ{P0K=( $hC=h2@M+ `@P4Re]1he}k|]eO,v^ȹ [=zX[tꆯI7c<ۃ'B쿫dIc*Qqk&60XdGY!D ' @{!b4ִ s Exb 5dKߤKߒ'&YILұ4q6y{&G`%$8Tt ȥ#5vGVO2Қ;m#NS8}d0Q?zLV3\LuOx:,|$;rVauNjk-ؘPꐤ`FD'JɻXC&{>.}y7Z,).Y톯h7n%PAUË?/,z_jx܍>М>ӗom$rۇnu~Y݇̇TIwӜ'}׃nxuoỴRZ&Yzbm ]) %1(Y^9{q"4e?x+ [Vz;E|d1&ږ/0-Vb=SSO|k1A[|gbͧɇد;:X:@;afU=Sru CK >Y%LwM*t{zƝ$;ȾjHim @tBODɆj>0st\t@HTu( v e`H*1aK`3CmF1K>*Mk{_'֜dN${OT-n,'}6ȴ .#Sqη9]5zoX#ZVOy4%-Lq6dACYm*H@:FUф(vcD%F"i ' VVdmcOTKpwq.M?m12N[=tuw}opYG]2u<ΰ+a1tHayɒ aY(P*aaʨ@ΰ<pX X{k[%Egl1$9  ֲQ$'dJVE%mT{z`R$77.N|b>harNJ(Bň0ae3V#b,PY0TEu1L/]MTB4$`H6NI\nbǛ*AyA\(u|@ [h-,j7gDTÎ4oWJ$j!fޙ-did˥]5]5᪩QJlyIPEQZȰ<' I,($F{ձ7*Oy 6EK( EF #31J8mN .TTF9㕴/5~RxCe,&v3,JE- ZF5%Da,Gܠ*qI@qlG6s푻jÝ$ >8ȕ$eZ1j[h0SH,qf<"${/ksBK}xnwDb%M6:K<~̓9*u᛹Q{FЖt~6S#G1(zr6<ߜ!?U\(0EmG4 4c~J~]ps/9܎ms4gZY-07`-Id,9õ԰t+-b[uemNi_󈛥^g+!SKq<>78NBx;c4<ニ)H .Pd^cR^p_G+E--ۥ_F]a|v@|3p%kzh|k*BBRib\J3Yn|뇱[FfP%M:<`pz?]6laz5`ZQs{>3ư_o%oU׆]YLz_s߭AF'is^_&uUm$[[5HI4QCZ5!N&D[uiXk&2Bg&Ս7_/6v_cd쿽d@eU XyX2z>g8:.⺻h()&nO5YE\1t7aSyFxPV19 ĕi%K"IcB j>Pm[E[^oHmmU̸nG pHKZ{{Qo}i¿Xc\]e1e,5`te.5Hhao<[50wMUF􀍠PV?Yg"ź)\3mf|ܔMUiU|Ym! #'ukMmQ9Blm]TO1ba.XW x6ܠ9[v35H;-]Um4mMrW-k#~fؤϋu_j*^Wj^qM `-Pk.@%=X#|ۡb1lKcj$׋bKv[~"N jS4HOkeF3LPyi︅iWk! cAnxu6<7cp?WN $?X3l(?  'Z! ,Z.maO_Bk/m~ޖ(<qRfR"Au\PmLZ"twpuJ` mvf+T!6Ѓjw1ncuwo':o gSPC=]U҅yY9 &K<-na'Xk,P4+`Þ/lX/bjFO.= w ?>ȑ3n߿z,t s5Z/ Clo-` z?a~b mzkC zFȏ>1k*Dls6vP9hS  ehC.3 @6ijvUuBY hBnb[ Fr#D7ćlA!:X lYE>#0JvʈɌ|\u,'Y˲.,;oOwoj-25Hݻ7 li0bSlbw=IsxhRbd+I]Y]JP}@.供SЃ??w w@KvKts[TSa /ZaDžPAEư07>~w3n:U/.P珀Yaٳ5Ʈ]խ4 ~fh.8C>n@T%8ac#sĢB\PIPfwJQJ;Qxm &GBf\ZA$Ba-z|A-I @x70 晪MV)m8[6-Te@`E|=U D(C{oVa*H7MQK"<O%MTTtx袥:2JޚݶKd7UZihRk71VDqiގ\<:Ѓ3"gJJčE&>&EI|I˿j2ǯɘCGOa9C1L ={fm&'^tigk$DA' elW@Tiv{ !]oBLKJO*t*\n-iȚ4`{x_z;j3Xh ׄ?xt.o:`x^d~0u$ v48 0_ | E"Hd"H`A0&dY3 ً[fctWF_hdxMUY.b=eaI3Z=᢬-'~DWc;j FRrI5%N/K;Dk rCbm7чsSW_8g{RY.~XfEߪg:smBi1 YBX4),[c^54Sg(s$sN' 88`wC3TE+A\.ԍל9 y{͝BxG&JS meT;{З>'[LR"w F05N<&AJ3DA0ʄ4(zTUWDdE3̻l^-Xw3Fɀ{B-~.h+U8 i1b8wؖ#~zQ`/L 9#Pu/<4A L<KL U(Ee'sCcq !Ȥ4΍ +aM(VldX ][T !Ȱ|HN~6y,⒊)$e{)SR#kהyϛ7^i58f4PmB8 Y{qeφvk73:1@ƛ.{f8IGv*1藺yx27M=>+VnG;\<x7v21՚H :[Γd!E'a4n?k[A׈(sob 41Y9(^SE@7`KIK`kx& V`X0,%pe_ן >hd xе"Q4SUwy x<'o_~#6$g!D$c=5ۄX[ു RzG:柺[ӏ[3frl ô ހ^2TӘUAT!94[[m۾\T)W> lv+ H\FpG)ۏjk_c51̃^cn ba-X/#=Im41NLu\9ETp^poAOO&]iSCQ&s~In/SZ % 'I Ƿ$DEu&ݛȘPˬ-Ő\B`xr`"F'Iٺ*DnA)yzr^!3Ír!S$,.:+d̋BʺJ#SX*8ҁW7~>oOFe-<uJQ|FZEP__gi(`0/ƍcv7go2G$ N%v$^^&Q 4AMbvvɀ1J{ڔhэK'9*W )IYO;E4z⛢79"hK{BFEmBAΛ3>IO j u߿d{=t-n3Pnef9[}=%G*9sX,¬xS&9'E&"/"ncx}"mV5tŘ:wcZ К G)]$mbXE ^ǽ8%>,0FЕ 6vAVKVCjrD25#Lrv?33Iam:xy`|Q'eű^\ơ' .gygSAixپ im41;P^azl5|JE2z=.wcMԧ ax& =`|#HQ*lS<.U׻`>ajϿ '!9MHK:9#s,jV剤C:LIeHJ"M8P,$N;a-zݸJWc :.<sR6 լ$gu4M*B(A ݖΑِ %H;S*ڳJt>$M!^*n3qESfU, Iĭb#UFJPvBgZvn aE5}~2E|=D' ܇q>8[¿yp/9Om/5|k \6xH.Z'OeCD@cq:Y~<1LٖY9# xe8g IKTQ:+Xg:*}.<M{ZH[^>m0G{ ̷hiOO|9Y"mma[sSbb'Rv&{@6; KE.a\}:<]Oyve3h9}E[kMD,5 %sO{킒 8.K?]i/`׎tp NvԻV4|<{H@#*h{Yp/E%dlh\bU:E%h@&SEK [ Ƣ xg{z%ǻViX~鮦w35QE~qp[ʕ@}ZL! Z0!A⼏q)[f &E1K3i+`JG P/EG 4 9LڑKL|`PОnG#|}qOR{Q|2_tH߫%pD?1%(@nfxOrs25rMլf{sk7݇fjӞh2HkeL'Wʿ}Ƞ%>9cSH|cEyQp 'ˢd:,v-us"Iidw>%zM@9IqrGq:&_p3õB!>9'0LL]M[lwWVR9I5YpVgtuZfG{RoZr3ٮr;wW:͋nqCRu1y=㊻Ij z[|W%q0 CJV٨3,ib{eH7 mҝ(3ɏO/̗-=OR\dIoHZ6n`R֑&#.Mv0vԬ]I˟vrK}F9X|FI#g.Gi)%!iK|o}|ֵ7!ېATJKB2Z/"BfB(gdj۸=}'),-iX'|M2roK\e5Pt:*qSH PgƉU'VKξ ,!3`˞t1Rx}fvvPXdQSg6EDT:dׁz^DjXp͇G|X5Q9K$)U?o': .,wؓaՁ_ 3]Q16ZYafuvrq^ѷQT},!H]6{Jw>%wK{)rH+"B4H7-]r}7v8|׾~Us?yWfv3>xpRҧH-EeJ~4YIozi:nq Vq8swHOzf ̙eX-4`TDGq G.tݻgq74ŠqBFf8 9Fk Afq#ϛa$!qNCJ4bnvB @W,v&- 6wCBjxk9ᤉ ,Asy3YޜZ4ΓVYf'h?kNg?҆8oC!IMo:^G10EY↘H;ٍVnSt%_!BZMMeccBҎÒJH+"ūyR}X~juPp- j\hЪQxchKaS,xS"cV8i8'-sOKB<չw"|{/MC8&%Og3E#O%`N)p#4YUh^ ɨڻ#Ch@(R &Z+<3ݰb/St=&yo|BL,1+t C<ˉvRfQ*e"T:*Dᰤ*~IClz^F6!ܠqK3%$E)~?wy,u'u() C>Gn} t]2_}!1NodI_Bǂ/^8\3m!'(Ֆ5Q&xo 8;'Jbo&XL_ʣ^^"Lq2E3,v1ɢu^}G7Z/qC^'+HDy=\]?d|9i,p?߼=\Ce"|Rݷ Q+=zxB.^Bld.HSntºB4~4]%.i|҂"? ~#ݤ[tfv3Ytck0O ͧ gP\|bЯ݃5H+v_uM Wi·yT"^'~i6֬:v~m!lk҃=pnUגZ6p| G;;74^l{Pclwů Հ}xcSu)6fbM/R(*ȴd.^Qw %"=nluOeH=t) Hİd/D!-Ɩ:;v8`vU~Ʉ!hX #'$2j1ܒZ˜bK@*`*#QA 9WykGk,8}B6{/) ݆Y~ 1;;|,ۇ=sxy+@{l/*+E2}`pNU`ZS̯窜qN8V ['4d!FmaX-6 y:1V(!L7,RPEd;)QϢ +RlWDžuF7LFֆoM~ar*EtIbW>jqour?qzJJaQ#-n`/$fhnqgTĔO5 ꐌSYXzv9[ezksA`<dkON৯s|&*pNaJه5B5H:W2% `6MRR'xZtfC$1aH_dx$1'/v^ZZ4`9);q`F"d1v>ժbLGd~MP%m x52LMF9 E"A,S Vo}\"X.2< 5FB΢u.`aJ#Tk’"D#cuCXȉ4 ՖK(KP|dZ1&8{9rLnMRф%V Ng2K|`ot.GSGd oE'!B'Nb1{8LW^9KbN;sö!`0ݘ/l+1L#B8U֕&*?V6N{դ}Y(INBKhx2 *MOenT.a~.E jG)j{=u^K+Ȫcv/w#MivX :)ǪCZUnAS`SK6OSxa3 W; K>窜̀'n 3u0?K@BS %fee}i]>̤+*l:\歶!IZ5>H;0)N.w7ߍ|+qUߤ^oå~4en\.cY[s'wSSۘf ?.D s}Y~/J[}jX^ޗ_-/̍ݥ*n./cus}]\>\\^'W_nAqC_oO-S_sOq?B}mmK2/@DJt}=xL@5MG0ZY,\S Eb uw:YɊ|ZԘ8'ˠ*>q/E b\ R%.aS qY>W Rlz!>Z.|<VD h5^6eM>y̆@ x>Lh!*<-lo_V684A飑i2#@+j3l૎S1@:G|gRcƈ?H(m>LC,HI~'.Op% ' c*Dp*cj|>z G` |]e*:nq!`{ qBAgPSO}E`́JPu#]' 3N+;fwt[wL X1!;W$*죓Ha-s>Vzk[~S_vD.yΕ`h9U|A܌ЃECTC Tnpצho!=V qy)U cigs^>sgv"4N9W_iI NRCǔd X1Lb.u@`X]nl}!:ViI[/SE un޷(ȊD0M^`MDN74Т C>F-}$A:XBgJWq&4ۓflq6TX)ى?Nwg>]dt*?Ű~{N_w7p682~ =WBX"XA:#u-9`x 92$4_>9WvTIj`+C2"s%DƖ|2H\2+AaTaBˮ}L@dr_Wfc>IdA Od[jlec=XJ|&+-T1m8NP$%s,ig\Z:h Ћ߉n!r}_\ \5 6 d#=&X^-kOwĝJO\Vj; )!eoB4F\jtctUb.L[3M8V|&jZz/@7aV),A[5TpUZL_?CU0E [%W%vl x٘3܎y,< )i7 Ո: tC`\?c%v7\Ct!$9iç$><+c~݊lz1H[E'2/clQ.I`AWOlw&5fH n`gMytdx)lwAK~GgbJI-tq5/i ?WǠr^C/1NEU<=co(k0Q~wˌ\g,\ rf\PUH,L#L7E"`0dq@zn~+CX|,l_B'9Dcuu|~z+G q|-bb^HcUha9ce1P[;qsA.Ǎ-]W‹y?ڕ^Pm:>I+Ȧ6' ,}U=̀*Eg.6_~OJ/8V ?ç&+|t><,BLqL򱷬dS{X6"X#-^䀕#{К4i̎'QIc(<ǩJi lc*n;YKOIXAޕ4"~ %G83oNA`4Mk^!jR|e(f2ĒlS: .R-l{ eR'E>dKUar.c)JYUIe6ޝ>cUEkaXu U!*ZwDUVI-nJrl!*J-R.eZ\z|E UUa[ssk*8+nO/yTH!Ua?o">U$x{QU)qG84Z ƪWQu] CU9 )kb39OGwc;U]aa\#Z ^p4񌇣dz͘|Y3N!p'9We?2'qZ\U0vy?I7quDZ<¹dYi?#=şg0-tF̝(sO7uH9d0>ħ0='}}OC\zwzke4;Lw1}' #yt{u൦" s7_DϦSS7s<7s=׃s˙Z`]hlӨjF-(l?b@X$޴<>ۦkZY/!^,) Y1I6-_.:;KVS:+.{bzdl.Wɱ9B7<29Hf9IGhg >?>G_!1nl|q?&?tWi'_SdqJ%n<Ƞ=KX啈YZ+9NT`{Ýo!w`sXqrϰ8MW'(-ZeYâǧaSVN-ѣODrH Ə)640`NXZ_E#$ȫHX,k‡h.aNxM[itCW<3%~uzLCs"1wl5YSݵ48&A[y K?-#VK/GG>Gg޼N拦lo u^}zt3Ӕ\JBpow-?LiDV1wTDZ,; x840Au]mCo?CY-Dtqci`}N.4]%NekUl$%k>9:4 vσJhoNiP4Ku-wT6ow D-F[d^ ݕ8I^08pI'͡e]ƣ:&E2_e$"q'A'롁sRISTX!vtmtM&K⑂!kMl!nd{DΒ<(IgSp޿{}r&K|hd_tZ8ӴʶOؽDҙM !h=:;l*1)Vi qf Ka3H4yVG !gh0~$p9'{P#Bl\w޾h>*jiZ\ ybI&5u]E7/hk `LH+(eZU/hi%ၡ4@b2".E(Ã5Q;Mhcf4$~9>#QFvIݶHh7PҞc H҅;<^DMpV DR6j ~\c Bi:Jy0KTF3U>25t_ߒX㨬W,0#RbMcϬ֒|>n*SZGv'1QFs3 Rκ:iC"TZ!5W#k'[剈mX@q j.v>J[0\Ճw"ALzH<*ϥ']?O"l$bk!zc&f T,ѹ۪ b6ԾH ), }isHoO߽F4mkKbMy{ZYU7v`SdTm.1VBk*I^;93e:0ogz;Lb*i4 &bb[0[(˃馕֒y^Tb-aO#{:s0=O#c}hi ªjwKVj([nl ݗ4,)Q=LKGaExP+9&\X"\ziŅ%zv ВP͕e(|K* W \˒:jV V2OBG ӓUDv [O}][UшY%цYJ]:7fOa[P٦[GG"O7V!tT$),pCG,\mU Wo/gL:WF䒉-u)["1_El?-[j&5-u=R,+9~Wu <|wHSmëK]E b]ՁnwWxUy4HU92!,QFboDzዢm ]ZDQ+bȁLl&`ltĝ*eU&'tq]IxxxQ O"g3d=|KOP,*X"iIkE9'QOqc,O3U]ҪKo])sư1]hRqcN~T:nHU5LͭLBdZ~4-xamҺ=@eSblm"=#Y܂?x~"Cs ˮv!*eo}]+T{#G$L"ŶZNE. 5S +}m ҰEؤ{͘L0i ,&ڠcOL3d@qt0ocfLmn?z`pf{{77@{E Ej̀0iރM߯f`7P{{3 (m XhNߪ>d#@dftwo`&`g]z@m3 Î~(` <ώb(. 7BFʳ\MYۇYҥ?#CPfLN, UtSYg'v{f'Z:e,/5-DwN V/`&[:!v%@q Ta Gt¬& g1 O|[\< ؓqZ9{5O$ hs.ㅖ۫}N)4s@f/Q7 E &Znps ;/Ix4H޾Y B^~>ENE*x -Pw#'x 4tkœ3dI"Q66k>ӟ]5uwĕfeX_1?+~Kļ%vj*?{6 !vF6fӴI3dIN4u$d'{)ɴv4b:ږD;|>wP@+&o_.L|}9ˣ("I T{(:O(:> ni}Ml[}Tu=uz/Y@}H$mD p)D:kUgUZkpR){w@'Qt>ES}%Y-r77IT~@9E}"Fڤ&5o׸f t[>f)& J o޿5hᅬO_N_V0p8IC ]@hW ~ЄAC(w%@a4!wcZVix1zizbpP#`DGIja= `Xiw毄0za 8ܾ"/d{yo2D῜pn A&#vF|ӎ:CΝG>Ng8{V˸F-}<`1]sCŎy 9t{61c_{w=CǮi0:sZ8ӠR?Q 1ٍ^3=q^¬W01"6>@X<'ޡ= sM=U!U,cG͆rȊ'p]P.6?>=͓<<?@q{Gj.Ǘn^:KtZw9a J;J?1/CRr7'ya>.;y{Aoz⸸Aqr+ h_14?S4 >̂(.ZpW7T|RQ?v1sG>:Ȧ#19U`H阏Qlz(56L4b(x'Xܷ<3mfbxuEѠ4) yfi "zao1n;ר/)ni,\ȅ,Prl]jGqpϦ>L]0U7sZjݥ[V`aݗ4Tpk˷J6V!w O>ES,}wx6BdQ,%gsmeaE6iGnT{I,pAń6Ir;-8mXY=zvɖp<0f*s6mt)o j=Vue֋,t8С|"i[[mI;-͋Vr p ~RHm[: X r[ʹܢ{Ե'="Y5(@y;ڐP4pJ3|!e<|koe˂ e ʞ&(RP+M6iSE'*۶YFڰ6MԶݺq7i]kYZkae&˂jo.4A-7Y@PgsA l) >MPwKA ˂  ʟ&(RP޲z =MPoKA 7\Pi[ o h,hO4R` Ķqpr 0@ yq؆/BpϨs(과8dJAsf:M#y 7처H|QpdRI:=W$&q;`΀ \:$8w_|~d ΢,OU 4ьBeVmϿS08Y\j1 &% w|^ُd޻p~qEUyV7|>&*Вgb,!>T]Жq&8J<.):Zhw&n1BWԶQR`9 T4?,~V5F'-̊1ҋ&kxFFT(eI54w}1jw}!RUA5tC *IԂLCM[zhqSԍ{_FhAh|ޟ⴬Տ%OI# \jmqAV> hT8O0T,YBYm׵u|y}1^㢌Sl "|].dZ&s(ˢ KUZFO+ң ]^=qf),&Xw1w\ Z5 95۴>\F9\=̅G`v1,Bo(NzV t84DٕLug L.Sm tk V rK EwO[ӎ^R$!S•j43~B 6jg)40{5mg"Ӥޭ@z'!=`#Q=0:*,fWh6UM\n\}*DxOժZ{{>Wgu9[qeՅ*fUV D6F" 5h~ѿaj͐q=0(|vR,gQ)-Tԋt*T}P >f! Dw:DS5K?Ve:A ٷ55f8?ºE4[I!K- ("lP;M:#aZ|a?8INS iXzbpiKOetSϢT)W-:[ۼY64llYvafteTT΀lD'.W~_LOl\J䥾\a]uۅDO_C__0pVu=qX;zHUACc}.+њ=_͘>Z>҉g[.e}ỸxΪ񆨣|(iLq5JnRqkzVgIuqWyѵo;ḱ싃vbzp}k:_ D^׆sYBBeQLւeǕ)`4.GyU6LNaI&*tBi6t9U4Ճt؇e4уv8P3b~Zs6_g4M6*;"f S0rDn.֤cCaߜG -6հ6l !س ֶ UzMB/$S{נX2Xs& !VJi2]4p6[d4/+Ztd閿#V^dbQݖ8V[n4=yטٗҾH6IW'ڶ+ hb=W,@!k[RקXS?SG>UWץ/ Z +\Z9& t>Ak!UzXwQ:LC+_%t_a$k2ˤ&J]6|ͼbixMv8P%\a HF!P.fG DT5{C͏e`+2rʌqd( MQP+J<.8hƀȐwMvEgt9vqqƅՉ0I-)yDDp-5EC)=MoRjcX|2QХsslBb8#$.qGQFs(.DGCp0[ƙXg;!nBa)v28bu2T췫3{ώ61LO et+}8[Y:(WHb*6u#%ơrKq;硻5ےDcWJW:R^`e]ʘwB[N-U ]6'M&Pݨ#f&~AHoHQ\UdDP\.e /i8Ri5}Sfvʥ3iOt j[ZY$;i.gDO"{w~:8 tkҺl J`DP9FQ*M3&U+_wk|WS" I:NaZӻ߲< d}!`8,p5eF\l+;G"m/vSK(*E~/E#~jC!|b\/k[s{_8 vFd'we$Wy\ð *E-CQRޑ)AO%"RF3cL_1$دw=_pN˴~_miv=w04*`]|C5gC?Bs>^ֈe|z/rq1MAzyi0Ց:^vu^Ch6]~9+%G&=r,z̓j/i(`DUٯäρ@U/hTM,* $ &-{7$HOuCf tGCـ7ď4p_ ҁPFg . 3puV/cL\5isL#<Ź|H roTD潲 AHsHݾ, ?\;E@KڀQCu\&&Wi"̼vhƏB]odƷ@ [t)U#El9ptU8/5 #0ܴ?XzU)VX?aS5ؒ& ~,$-LTֹ@$#i  `E'aAtLN!Eܖq0zKr)1!:K tU^+Zn!ITQpH|7Kg攗9T^o'I^ ~C!DfjLS Q F"p{ m*sc9<*]8rSedh5CEGYmHP241q~ݲQy tݹS'U=& 26}qI)(6JNU/փ|kUi|_hkd$!Y fnU[G`{`F`,:[Et&‚RG#p!9U` ܴA -ca<&P4<";pv˯8Vs`60H"CNYkvEdC)Ν9ƜnU3 LGVu-6>J- )`-to\wl6ENU)E5GWt1vU D98b&EyWUhOZͩCqu5ok7=_cڷm?"nx3ښn$тrşm4N|ث-?]@{lsC zܫi1~x*ʂ2h6UidAZTq57CToW.f7 ]V9SBJ]"5AQĠrm}O.%]Jp¨kB%Ӻ5 A4a*QZ9ViujYp4rUigR?gVECqr(o\w2)GK {|"xhF~O tr$Qs*IIGmWյ!%52᫐)ZMjo+k#R$qT ʎ)_+juXt}k>hbO%A'Omlb E1r%u*5xdL٨Fq[b!tN;\j*^%4WG19`{3ۤD9vWG^!9/r|~ %4~Qٰ<"C㟸&6fa- 'a T<PN@̓ g /V9)bܥ1tx jVŬr#W58Wf/o3vmf񵵝0arg x@v)9Mnb1uc5֪TF'ZԪ/&&C<I]WfsnUV'eW-(!m]vbӏ `3]bBXU=$=+O=?!øHEsÂ]a< J㚆N민,/!Qw/Ϧz_ _OF* h,3.zU;]M./qg K_CiQ玉ʈwO,h ydfph'byD d.X/(td1ue \X'{ۓmnGr٣6}TO %F1 u4"鼖rgɻơ=l{vlwy̴ c$ӽ6l7E̡6ݵcs]8샳ŵ8Fd @װ՘"adJV]rY+h"QC%@m*Z E)i-E5| &/: y}098!˷A'r.u6%^|DZ /0nz){tnP#r""+Kt]'.mNHrO ;.}d42Yrق|/['WtqA+O|<$J{%b;at'| ߺ ɔF)U"E={}{'UtI󚲤QR@ >T8jirlFɛ!D'7ެFBrh.C0DaHRD1$A>-r 蘤*4Ip[ʌ289:duK O(GuWv]8])b/A䕉2~LfM"v .]չSVu~v^ŋ6MȥkNMqݲ8tib6]l߆o8ZagW)_Luu檬h(6<_y1uA>i1 EOD_ylB7X+[NN&29m}tMV:TMNUOmV H: xXv i /%{=IbzNx%@1Cx2+,Nw ]1-UziR܎#^je&-w%E)^֘"Q^_9yGu,{,/~|ZGY} [D0~>z_g{|ք)^QƵϪuɽ4΅-)saq|QJeji?XMߔ1@8yPgJ( )}\"'Cxt9TKY -"jvsn+?kU)p$r{7QO c[Mv /:)N)IwMXUN(3#&EDՄWZTbHT D98 v1|MsiFkACIj-F.dBPZ *p!jTMd65lŋ1v\Fg\yͺV. uag.…)fAs2ܥOhR.ɡɥ, };Li'o`t&{T2Iø qS+ך6; [7NYUc6Z(h]Ҋ:D'{fqr5,lw6#:I.VX nIe=|F+(Ivk&3 ]Z 9bG0w-H7y*q&6}ib' ICOEl7|E'/OX7_mXJ%~rcNY&=zqxcb/9Ep(n a2h=F7SP)ce8$X~nYb l$4i: ]YAoZ_O/>R?lb0yq2!f)R3ar|+ё:%-B M dAc+)(x -Nvo^kއeKI2frWv<#F% T7,M7ИA*gFnG'Tbqe]'X4\Ǝ@9p_YΠRbrribᬊWw8O=K˒1zPM譁Wܢ2RqkIg8>e,f4"+ɩB4E ] e۸2vo-`E n70>~XCڃʒV#%.vHFN:5"|<΃n'vU%UndF̣&[#\V<烼qYWOyߍCD@?Fa XxQ ^MW%E! M&: >y^ 9D3mJf~Kb&coN3híSp⨌ KpEğg2uo62Jb ߘ]3q+ߋwMŌ/֎W)Bc/ رۻ`8p"tqyifEknmA>M?7X6s,6RML+KqdhEqex ?0dhλD %$".ѴX,Z:7`g'M]rAw*8QJ#.pDqFoL~gCQ96tx3rs: [2"us|썳w7|t {ܹɷŏoTܙ殗t 4z~ -*Mu?fz~{]:'҇Mè[\Cn\azxoݸ8&DU*x`73 D2e;˫$z[0+GgWY"Eᡝ?o~__/҇{VJ.1 eձ:w!i\{׬I 8cg$3Sh/3aӿ>[bn]c*^Zq*rY\Y8-cNH*!CGݥ)0oǵEAYWixD"y(\ l29<@ /g8'fVq9 s-1g[OFfrs9hƧ<=)iFq5?%p̈́C3E|첫 gi?,:'_njߔ 9,gSUw:c]`1'? q~1'"lv5?c36V32Veΰ8gg8nnr?)oNBlm{ě+JS3?Œ{piy'vn|Ed6\O>~&K>s`(s5 nem3g J[-D 7pEG :) HFBB7+w%y+$Fo^p)hKHR!ۥ rb Z&xڠ+{wCƵFHYk1PUT$- %qB#Z q"mHiE@aܾ-8Jn+JF+vtlFfڔjk7qj7Hۥ w SةE(2Q7jAA#(i4$EDA\3 ¼]Pj6(~aJfDTSO`| Zh3Kܟy1)M>~2vSh#V*늒5t8߅trQѬ1}4CAՋ\phׂ`XP vLI8n,as|>q IXȪƊ/͋&KNRh!<Ձr+;Œu`5NCn7}dSF7H=hٌf.;a7jR| 4n'˛Go̓PxJ0uDi-LQj+)Z),&:y4v<]\caXg) +Ýrf(H )ETڜ0T{]ܖ|d]hM|͇ ),3Rf?grB;2 Ϛ\Gֶ+㰮Gðah'X$^F[&F;|vuZVlvmyYnFyٶe\; ֍=T#B$QS̝@q8q\P<Śhr^o޷xQPM)Sp@)zD3f"FLP!}Ƽ}b>o2JZzvEFZud< G1*3!wC9̶lAV>XC"zx9EbͻHH"1Nx`홰3,vkj͕>- 7ſ ʴk AÉlQy:˾(&RHgV_^X17fyܮ+lgh]9_z9"rW,T~H]1׃-+ke AwDzW N̷N.%pDk*Woðǵ\k(HeS>y=~!lW =\j]?Bz2|PX"Yx6*=zvգ;JX݇8 *Fi«O}p<_ 2vR;+LNo#>[j L~*w=qOի'X>-Q)q޲, 5^W!֞o*>^ ʥ/8 9\s 5ʥ+bbݘALj@q@4ɳp%nV{Evo>;@ 58-Y劏b<UkR:^v=;}*bb%UVxQz2X5TlX <HF 5Vh_%^!*+Orv]UW-Y.%9 '0o_e I^$q a =N{&L~ͷxgd}f'=J\2l翷VHLK iʉ q/tna(q碨Lq^Ha{@XA 8ZrN.C }'&o¡F/TU5iX#5;tcpLy{tk8SfK!s))Qr*>Sz4:xRb'HUq֪O!bhh3VmM(,[&Ca!M[2Fj3/SDaϹ̼UHjǸJdR @jy,@r/ aKq M4fW2xF3E^0S̀heBkR`̴IJ3d"F}[GL; }2D-q%)J BڋQȄUFb)Rsϴւb0$ %)jdM֌@`C v V-5 Y*閣NȭU|-;Fnq)!G`+A "5Bi̸XkkA{("F#] Pرy!SEw2FOe\ 2agPTe@8ә 3X"h "IN &TY# 32jV zRIGMo!,dDl0vI"s*R1z Q,倂2#Trp6 ydZNb^TԃR 6IpEԍkmVqymjeuBViXH$-ڠ7¸EJ  ZA͞7TZ!W#i!9ՆIϚpFxo:I3vT 4uy3@TT?E\ޕ6r$_)< XaU1<ATvÆZ"xqdDdfdirs@՚Pl|w w J|/6Դ4cOAO_>H+f$$֠ȽwӅ+M(+ #/Eg `qGXqɹq JhY#8pTN8a-8#tҚ|nLVzx[#GL@R+QAz/D T,]tL3t!H&C!鷜cj ;&~7]f̯G&iRBEtBK:) zC7*Y}"rcV!R foEg:TB$gѼU\A94EAۀ}8MId LYA6( 찴A!M}V='ٖhKf81:b-2O.삿~~V PYZ6!`vzybUd5m͈/7țijb~/O@p57x&gҿ~=rՆJ&Ǘ屿ׯwald޲&".\C_Qyt?:dS}ηiחR8zY1lw4GaW4Y?i nkr, >F3k7NmkQnLmTʃ3Bz 9 /#+*&69%+x`щU-?S7F &pK `[w-p5Ž jEtCZ }۬PK-M7;`.F \< DL)0W K+x$>*+u>|D2v8BsFG DRn38EcZh9N (,M؍Ua^wr*djG)64/ٍ:;QH洔JEkrѦYʢQ8I8EY܃r%bd5gֆm0vHڀl0ڶ6s,qme߈gPBK0Gp=yKNW+>_/'pX`ĠHկ6hKxvHR+LD {\5aLnf X1'e woMQy5ô|0`!J fVh {l5*8={p ~k2Ev+('˰5ڦ'nyvz-ъUUDs,)4KBlq~U6[V{E5s q,Yy]vq O9AǧˆWpݸ~GH}RST'kү[eI"/]ѻDϾSGnNNb^=QSkkUM]U X+<ܶةQ|*zةt+ pq c#B!InݒnMt[nٹ1;;JmZҝ[A˛ՋSb$&µ}-5ƟL+Ղkbz:w>d7"m%W^Y:ùivx5;^;vw>@{Zf!Fr4&rmT-4hs6Y$qEn4;u?b^|dV^q<ŹRPPl˧~snKFѭ&B)CO˫f|ydRLӜ]tӥW,AEwC V+$ie8^{Rc)f^֨|>#5k\{|Vl]E,EU5M\,W\1;邾i(y1?]yj.DbW5ɏ[>y\thDL"kh󗏯a̖%-%#6z9.)KtnИIb{VzN1`ts7KwCnޢe[/86Bx$.=a["%C'}GJ5oϸ9&ّ&I.^{L*W٫q{>>>>?=$W^S[ gMڅz35X-pÃ|6f_-g= _R/c ]-6*\,/V#mU>ߧ.uX!9~A뿼{򊥦V:T*x糋x4BtGޥѧ>Σo~T?rS&_PC˩?1]zkcwqyq}~ |'|r.)LϢiV7"dVȴwicn;p.?k4a?GZ&'Q6ƠGV6Zf.ˀP0>F5z$ZmlhbDVv?9fˣ&NwB/ f.fCG.~tA mrZ批+@ r1#Bm䛻d Gk2ñ}㟵~g@7Ƃ0 K3j֯oU*?TrG&OBajn77*?e*խŨl69xVSHŭype1([I.Q8A0ן@J7𲌞":"e1V lh eV\3R\C2DM sGD"!K4mvH#i㎃%, ߆ 2ƇDQ%Ų4 ԎLD93a E2zȌHˬhi~5q!%m\$ePX[OZJZ2 TBWND KJvs-ȣ*fɗBKαD$8$mV5T\i; -w4%kiYwU8p 31Nn?'לGG)sZZJ'X| F.EH2ԜEKAbxDTL\Z \)E,'P{C@#X@  Jr4 JW6kEZĔk2 ]{,0(9Nޑ4/uL]% p4KORDVׁ52%KOB{-<&]FNQMwmY4M[~Y{wNN#THʲSEJ^ݔq%l[n{Nul|"Z2i70 K4 p§(Ry+0$psp *]b ulP\+0ڬQ #F$y2hsi"5Ұ1P c#6@,  FQKJ8"Z.;.cH!l52d9 T c$ "2AfCPl!EB>2A 0hHoe*b"|ـcbgbdHÇE<$e;(PՀL >~HdAS@z+ R !X `EdEKHmTe20e6b2N}9@VC薇xQ N6cHp*3σa"V1$Iim! v""}n7ȡ;:qR.+S001aw@T;o6P/9 (TD%&+)fϳ51 hcAjhRcDX )@DG@!d2Vf3J c>'xFR<0t(ILGF̱sXT4ID'( Lzd!E$bz,å-Ƃy03( 3L}( JXK'\ }-2C:gT0#QpF̂+f jT*b--XyV Jw a$R؀$ >OR%Da1EYJNjW3k FŸInҴi&isUa&Nr ȑԥ3̒q(Fwd5FƂaM>#KY:Z-+Z@ENсQ9)/~r"}Aa͎HMwàDLVe@.>ra1BSTŹ|A*\&8'Ce9 D v?jИu|tD 0IJ >:zf9Фd>\FC۝@Ea$Ti@e*,!t(oW8"ND42(B- P R4.l-l׆ 0݄bDh5E7=ݣݻ `F>4 4xOo-2rj7gz|1O'6Yjlщڵ~JW-ųXy|u 烵,RJv!nhh@M&eyG񨋜ŊaQ{/֡/?~o\;ʈ2ўvqKֵ{ҋ*ȗ?aS|\X׮*VꪇP\۪zzzzzzzzzzzzzzzzzzzzzzzPqP Op`z(+ɳC[`QU-ꡘQ|zzzzzzzzzzzzzzzzzzzzzzzo]13EC\J 2``U-zzzzzzzzzzzzzzzzzzzzzzzzoKuM`pFj~tnZܖ6")1X 1JBJqѩ`;Bin+ҲC,Ҩ3/g'ɸL5 vm=|[ {EdyPjS@ox+rޏ.jVSfa[ bԭpf #"_Jѿ7zGٲ5b(=A ^sӍI[gIZq8dNJ?)!<|ٮ>( WbToKXKVk0(&7tqx41!}j5k% BC8C5_FښOY|]-9\ٱ,@SYk,xZVVG Z2RًHH,k{Dӧu2)8^׽=/d?~fktRvm7/?uJ{3׿?x|q(" sVz2V_L-" @'Yqn̰{% 15"'w(-muE6IJ8[.]8:LNzF bFA^U(" #7zgGXmerEpvuhg4[k-ǧ5? ׺+IvWI\v>^^$s9ĝmf9@ZX1䂭]cPL?BS\<+6xV v2cnyv$嬛Q5e*m) <6I|0cBD70j]ZGټ)~I _?`c0&~VYL kDn?*=JF]vU2uWwz)1ʗFr\ c*^8zp(:NyƏ?ys0Z :"߲ūl0vǧvPMg`eb2G'sF߮緡2o~J=m,-rP7>-fttGM7#9iX}gWiqoƻ}qXuw_ UNYO'A |[gFN?[6}r~⏷EAM >¶_O&Ň]vfE~<>嶣_|fusl[ƗCp/|~c H0$-ArcfB({r?U\X0g}/r۽#%b@fFY Cs]>F[ni|&wj?}95j73E`6[alm{4}zFH6Ɣ1Poҫ^e;d_ljؼyz#*T0`@5\g^oOhƓ#wfņ?NOiS=뇌~fZISNM:/KXΉ~bLD.7-]\z5Hi`J%sE ^+N(wJ@ a7zQ`Of2~\{{b$8O偭6 ìRvof OCDK8]VX KeL9rq{xixN]x6m_ԮWs.w[X|ܿ3nuttzQ{rG-\wci~2T{YC\^lv ȗӳ(m(Il4jϫE:?JVT@(E).L{l:O?]߼Og/6l㆝úH6Z}ٴU+/~Uc.5ŕ+;R]~pˣ\Y>nVfe ҹ޲QU_jp-ԍ )v*O?_u`Ut^] k C%[{KP *?z (V zrvUYa0ɿ69< M&X{y:j?Zlmn!ŶBՔ@{le~>8d%l.9i#"w%q,G>bUȕ7w(=K,V3)AO:22"2c,eo&7fGl58 m;wN`]4vV x;5榞d1>^- k((Ժs~BW?fm0?0m?0?0]jE#Ocd9/>zVڕzhnLqOPinZ6 S@mmf* "cD GI /y2'3Y6ɣё/gءJmrirQqG/Ԍ5H=ϗ.\gӉk.'Ưg5&og}Zn SL}I/4%g %1_w;tf这] Jtۉb~~GǾ8NL*VJ ҆ %G/6cZ|fl\8m[]*@.]8^{Su89ȿ'1}WkښR &&"ckRq'-EzfZx8 _e,ôkF_㯴e9IWٮ|M:`>Hxj>ĩ ^&jY)`-R .KK} Nz`:eV6>3S&"L)Өy6j8ka_J4ej@vbش~=,T j\цd$/u#Yg͌6F0g4&c{'h9iM;m̝wb8/pfEȳ[U8|yю.{.ޝf랶oݓQ^FAn @ : ˟0f_o)k|ɔB8<[/hܥsu獼'J hߝ3?JxGd$% p?ݕ::}ZwYaϵt], 屓/Œː\7(43v1f2q%K6-nk=‡Lӓ'gSu5;+9J'ţxk{E_֙G[*5G[Tgk;hY\`K%2{AhF?eƠў=k|{p͘E蘒4RC'-w̥:@s_(xd)Sx|C]!>wvz懣;sOᜏsˏӐ[{}hK5_;S%yzNlx;N 鈭?EbFh]^2T|3V wA OKHT͵DdIKHQ366:I{ v.<6JpfCʾd\Ɲ^-(Dzݫy'胷]=?g,u:S.b"c5%Ko틎lNgǘv36;Bے=3s&MP"}Zʣ: z?Wz /_tgrܨit5M&Ni4pآq"jNE_ `\FQ!Em/xwKf<ف-a?s\RMZ Z9g]?n57؟/V(ꍿ%%yIRaH'" 2+Hu9x"\+FbLּ˳~KR) ("yJZ &$MIlz@сG>NkCGcy>x|sv7EIrud߂X9ཱྀtfP$RxFi5V&N'٫;5PV"7rZ e1=1v ƑIw~c^AgR#ڂޔIK.)]N,E}(56cxt ǑsA86:d jCW\Q>$2Yã9|(fDER rG>rhU&,T$h)&XW-G:hȇЍ\7#$E!Y)rbCfZU6cuǑD^߹`aB19CuhhKr8vd]̣tqCz4t7%=*~f6 X=^5J}[op[AY,f_a/'+HzOPI*HLB&1rV[ j PSWͭ jCg)Y_ QJkJzغ+]wE>o;a '/o٢5l {8+vQFao}[paAYG^ĺZ\2`s_P`nɖVSXqCa=c}/͒ԐK %"̂YcPBPC!wj?ujqkX# onoߣ5>\;>'Pß.V*_avE_F+/Yo5Uj:eDp0q-{cZz]k'ĘŋN%#S%jlJYKʜ`WܞQ3 E>g<^~h+Fìh(bF[ƛh4mesx( <$8^d:{clm[F6/Boz`n1v۬^MmF(Lpw˱ALK77!**\ϻCJњ> Fon0vU| ֪EN[$NY"Y ϒyiEձúZ8E%m91{3HG>51+lOMqߩNwjG[<0݈O&*3> t%Vomp0ƽ)6x7E>hGǽP[W]tLx 1p؆_g! 1 @r jff4-EV]E>}aΗ?_FY$Le"*A ӳ {" -y<(riRXٌǃ~;MkF3ڎd:C(8$9O-Q_%P1uPCAz͏/Ot#yC*bz]o-Q}OۯeR3G[%}1uPih8P3;E[g'fC). P;lܱO0|Z.o?R߳FÙA~ /[Tq"RAKpe1<:ȇB |xCY(-xW|r} (NRΪ^4xu|8XW&)=[1v< |(₞oH kiq (Rd:Yi1n%=NR]W.YeC\wYr bp"b.1$hIIYɣѼm;n;|(#Vxd'(#M>Qkq0Kh|x$Yc xt]@zjq|&Չx:|g*'DuF8g[l0l[Narp \G0ƸtuU1| zC |)ZrXI8Sa<Þ5ZA՗܄j܄cI 5'H2[(SI…'s^c xt=PG,]6a&W7wI)fi%sL $~f+ďMF&kB>cJ()Ձ q%Hd$=kl @# h궅3,-=Cā\Y 0m<hPt9CUU1׭ xIţ}rAw =cAxYk[mpVG.2⋴&2:옴^c[[E>ra[{aI3-.&Lj5RP 湮 AVʁ O4KN>/ȇb+Fؘ=z#φN̘/i.Rf"4GRH\Bit e=v|(#&v: u?KmU:_p~K=rj6aF%o8 " HC5Hk0Y!ПqM=bPfOVk? (g<1tjUKKq5' '\\PeeN*3| rySfT8_0R w2R7>׶ǜq5;0ڻ3Jw^R8¿ eAz.ad{םτQ ɇz52wOHp6{hytonNHGYFj` vlI{LNiJ 71s6a'Y"/mT}rj[87q *YW,J> åx D X=;,g'|(ZYB5<Yjզ#5]-^9eͳP1mPg$@t9㖹ِ J$,GP8ݳ0L(DžM`f j1׍ `hɱbȇkϗ>ZDѲ@;jM%Mip aW\rlB]K 57@=z/a+C;|(|:A׳F u=ꅳPY| 2,底e!qܠ`jj*yvh0jixCg8\kKGyj3ޑNg Y6b)!aTdr/`(2)f$?9N<3$W#[$.3Yck!xt0PuS8<@ fRF3 gJJpJ׋y"/( )fX .Du%hұ2]s2@h*IrD%e5D'mUCtC<5PCUfj\E8OCY[~.܏H8m`V++aKCbQG7"盗K*jٳFL; r}wb b8{Vþn5e S6AޘPh6 ΦqF4_C>%Hr6ERݞVێ%wb-7n`cڦzbXoQq-Y+ Q̹9 -V 4"Oj~i))x 8R.xZ &uWߜpo^t8?[hDŰB;-:<:0`D!P+t.o_.\.^ <\Kwyk %dP"C4굶P6W3bV*tl4"Me=/J d%G(8ufxN%xlm&Ш_Lj~уW/2se041N#fZ3Ĥ R;Qէ<[Iϕo)"(ϡ٢(JN:7;dggJ!Hg8c0gٟpв^KC˚t_-[6`<Լe㉍aٗ7+R;mI\LߪA/<0S~5p0mݷ9+p6c VSؾI%;ds#eNj5IhM\v'z6ގtka,2aQl`3bqFTHoCb'5?WZ )XXP*<m 780eDav$X8գB(?Fp3SբaY$f[hkб(dٲ,@Y6"^kRL)cU@JvkJk*:XTEeRjM "]̟o[:K i1ܐ.ruf}Lw'`= 5-DI솛vP OOURI7x f |>;ZAI*{\ .HUR|Z U2 /0LZ vĜN );>LZI3\> 9T aͭayF+N+NVNi~Ȼ n_}*;R֑wQnW3:ٳDGt͡bk,.sbs".(b20CBh]&BUo_6`xz뻘Gyy8q3N]%}ՈHu9M柋Mqhx\5F_'F=芚L2c#^"8@z'I ÷Yq@ `5aNZ"\c"dd/37D|8vK7+IC}s"8<|^bۉXL:3 įXqZfD%l<Ͽ3$/(_3hE"IF[–OSs˯_Ҏ'a^>b|S%wN6~gŢX}G_eN/vd(au3n7~/[ack)= #o6=b2$om'r:9"V[}*oe_)ccj;^V$fP?KT;E05 onſw ɕ0:8&BI 3Ϲ5$x<'kn0}mw?DbnWa0W~] B^>x$~63L&uBX?WZobkqչҚVwxS_ ?0Gkσ8ZmѲoiLHeC'0ӢXU Q@Ow%zTeӏqN<cH/-WO_R:w ˎv52sޢWrg$$\8C(9|fz+<6PgUT >x'qWC']6T8a= Nˍ>:+ &xg-o:朥!P?4B8L(kv$ײ6y y|~LYXCj zڢLr$lunɍFGBgܩ\R-r= YGЅQ/-#'h~cE+L×6kK`WII1Ezw`^7#N9q5xT̩zgN,٧>-hKY|G-u׌_wS^&$S^~]I%|G)io4^ɾ%ksOwJ姪E9˗ <΋:vuR;tM%':ὙttZ/5zө)ЬB"T]z ,e=_2{tqS2-z4i$8_.q^\nrZqob0s#-@+K *co$!K` @򌁐|WVجzG[/U#jބw pOy;,ɕ+viJuOE_c*ZdJe("ΰG`jrt, VHv..\d1_VIN@%]y'F:s(A 4)ߚuGd6>OӔMh65O&9^\ke/_DZ=uTx`فa; |ML:823OH"Ը ݸUlR7{sRoKjO\ej6 i=KH0.kދlJ*qƼ}(ׇ},))qk;xDH9H7y߶j1ĤƩU7.om@^5_Ut [geGz]76!wB1 EAL+Y=If [ -lI]`5kQ{s-l6{ZWEf0o r.% Y8S X2a%.c%.}\~;ݙ8y\v"="͘"xaH!Bi C~*dDW73nv4NEtՍ d_h<> ckm$GŨ˞8c=,s`Tdj˒JJUK*-,,(t#pF"dɍjJiOj5{$ πO1 5nZR)xs|8ga$AEeGFy[[jq6jX),uk.5ĸ7 mKJ'Wv6/0*SVop\HϿ2ֈV[dx xΜrSBun}yu:p61-gA\6dB*-ڃ΃OQZ IT.{5uv K- p٬.x;tNZ%4s.ue:%H޼ULp=Fe|'VRya t;to~#vLz}8UƬa55m~`FGkQ<1OuAheq|\i>&?f"ϟ q "ч,VBk֑ 4rs>Gco8h!/6O`>8U$-=M$(+ݦ \8`ԻbD{6*(Χ~ ta5;Ir |=W5=~cˇ "-u "龾ˀWSIR+# ji-e#x5Ⱥ ēī %ftD(k,icpVFv6!_Z Pe4̖̊)AGbcF@$% *)O~6aH|Ng'ыNQ._ۛ>[{UK ZJe b&;ϭ\Evki" .H1ꏵ~=py=G@ {6j1D>D.W I*4MӒd\mOػ ow7)`&-5$ӎ).S"m1,"zn] le|;HBT6Ǘ׈,@>d5lZ0㟐~ٯhjnlԈZ1 !8:ga+6''ɦ|;)h|~A hֿWK:|C_ga >|= a-vYlOF;6xξ eLJ. X}kEFW$mB\O>b٨clbsLsxzv cS#٨rkySuER-1e)PtJf]ӖW)H[4W#B|2=F;V2fhE]rRCl:HِH&tmQB 羷9L'Rֳx {}s$Vϣkvt9f%!XC4ENFg%j~P0}iIOdI&>cC3k)3vx_3AEO~٨r9fC{HBxc+]^<(r-50/Q`FplTGv x'^&0ObmV45l H4qIf]Ci_!ke{.U"˻;=\+Ea< t [אFl!r>kE7_Gx OmBKdhs5o<3rGŬL5i9y>ۜ)#@n <~d1iQږ6j"k%(I6&Xn7pz7`|!`薍*"Fcue}!T)!V9~GK6b)f6O5ˇ墈+&˿Jc[/:ܥsw{/zGyij ZxB]DR$iP?^㭋kkN?Av*k~+Wy;ݬ;p3~!uY0=ziS!w4h=PP[C^zj85 :ݦh{֨s1]tX8pʴO˞z7O0OB ==Υ"A)_NwomhOgDE_>̺F:FrVhyC^[IFΛ+N/B<=$MiO'w"ɗ cJtS*Ryf qn+Z$OdRP̿ή"m @jx !'ME)Ò:i.Q#Ez48]yx1¨ r v=42l;xTF;F (wjbm uיթ5cefwhyȞq8UN."O4Hzʟ>ix>#x/|bsOӝ !JgDCj2J_,Cv"^g ĉFƘc~<淥ޕO!ߖ*՘df¸%/6ܼy6R{]<O0jCrmց2pG| (`fVeӝC Xop(!'m+L)#ѼF\9f3uaXI@9meQyeb+/v6/y\_猓r^+g1>yY{?Խ~us쀼_^7ϛe(ͷnﮚ)~UMVm/cE-왷lOp5%;hӬWsCӀP!uaJS1]r$"hZ/Vw`;Fo\=F;)V :!akt+O0x@ύlZO^z0F'8jP 1ܸs}VJVqݬ01ݲxml-_gA$ N|;+B9PԻ$`! !+-UMeM6zIbh~i!AҒF2Y+~7B;Xs>RL ja}KFGX27"hi\K"dߕnM9ƆKiȉ=S]jIf6uWtB6:3XnY~zOG{y͕J- OɃMOT3t-AϷ\|NOyO?>$4+fFj|'tk|;J['wDm 4t\CPKh,pHN_[0$gqǯwrd+ !>=nCTq͔@٬||С ?)@hUa6 ٯELFf'"Yy,MoMf7t767iQcY+,} lAUS{d6٩Rxڑ%(yk%J"%jDɖC m.>2]fSo^ٚ>ƫ *s nfʈ규\3ju[X @LVETb9?fbwwZ~89V)Ȧa9,h0b€D! ?~<#)+@-i¼OSC $ghW05zB_̭NJzX;O0O8;M;mqaAyy:`mr:fAyΝʠdHgQꔹggႄgL92qnqGzmVVP,Hf%qS(<-On:q1;\pk \1sܘp8K~Ps"8ͧt|WA:]ε}ӌW?(;s}wF923q4_={% 5bq jb`K$Yehy+y?*`e4IX'أ^b 5 [v^aE+}6wmPJ,ƍA0a9 65f81rh;r.Z-}9q2f?;r:iahԍŽ'<<Ǥ# 9I1.i?7:"栺"O8K/M|E7su%$€#DvsH)/a{Xxbaz4FQ*Teq\Orn\D#Q]ԸI3֤[>ݹ/ݛu'ag~gHevR 9Du4Pw( ֙`&(-% %7wo]wˇlh-P-_D|1y93@xElsJy?WW\л޽{wJifٓd?:UDvW4?택,17vFKȼYŧ:|=5?Yi>:Wn5O^?axٚ&v6qr\K6 ex=*vC,oWoAG{cuz71ES[<e|u=KUutdTms& rm餁Z~.*9p6 |Uyu=gJN1o?k { mbvT8h(#eTJ`J-&.4F$G` bEX5l0?|_ zW>oP7UXp<ڊgiνgމeq Gwm+5eOb- to{Ivj_z?S#0 iQq?rR@;qw"+xy;N{C39JPlIAk(oWmQQAhx6WH0Gu8 ~Ҳ3̣}ſ6Xk5&㪫?MFBN￿*3a2mV{M8 0:tr<8E) ucfC/nkNKJ 6x UxYz{sIr|+?-`SȀ,,Xoz˵MB~4s@y3 bCW?_s=3 r">'s@GBP*UՏ2}tnA|=`R ;ՀW a[:=~_Q=hKsQrȘ?z )ՇS$-3ns;¸YW bIEkJEumn'HMiO»J 'Fy?揲_eJ-nEBoO;-4Yw?PԈ\{>nl5:j >MjO9՟~?bϴǣ=Q;?z'Eln"oz__Zwou$Z)%FA")Ҝض2FW6tKhC!!" cchCwG4ԍXD_]]^l)}kje4z臞+6U7}W,=yyF 0 ɹ#d )FccD5KQyz\QDhȻ%K7umoǬ`XNt☖>^"nAb`ݞDhlP16꒹y;Msȵ_ ;}j><BH>^PrV&&$6ȇK"Fq,"+2֪EYUkm|z[U3>6q-^1Dk쾢38'9cC=K=X^ _^ :X BN"иh1_Rt ٔ,|9RjTym;tO8';yS&yp->ll+.ƵAY7>77pR}";NI]gς<7&]Ž &ʨ ?J PxqH~@B c4⚒ƎZa.2(aJYMtȥt-bc,"n%]4$H. Ie.³8YT!i=w:6/o^4d7"YStcRǖLkGA4_ectaYW7OXS)VCOlB]w>A[jjH`}=}8Em?1H$6Ie9:6[.EEQ,7eӎObR>{w9O͉gZl>z,Xy,xHE&2K :鐿mL=3IIoccNӯT۬ NKsFw77U"hQpp:इѯ/Ok݉]LXhFY'=}CFCDXSd]#HOb L1FZ.?'0veX>Va`]m `DљcĊs|0E*֡"Ys!j[oU.\Ҹmֻ?w5 AHLa&M&`ABG=!B0R F`$Abc%XFbB(^B"E__Qt!QcEԑք.bcƚ+,8K@l&BWD[;.Rӕ*E<̬u㫲DGO\aH >0 ENvBB g|ͪc.}Tz綳Y?!Zשz$ DW@b.mK$ qqsgy"=csc]_JM*J*v{7r^M j(@qyy_-(Y Ce4_"4"|O6n_]v8tli 0zuB~XPW`P(8ب$ [˾wd\yQy^'Tc ύ0޲k' E;Geb ,`\ a+roq_v~J2ފx.cQ4$"r!bfT1j"L@[ p KsںP'ar`e_%R+c j*l*#3jF,^ L2.6&Ra[G, |Xn @A4)W2-Q+uʷG2>lx[ּ}Q߂e 㮤>Wυ-CKYSX<\;uQ{6-np[;YrU۳.@{Jð/f}YCz^l@VJ_].ٟy{%ON1W]1oAXlm;brhTmBʉXtKng營%0cW'q6k۸_i9fװT۸9jYDrw3Y}/zfqeW\m!ˬ@^R+Sz3;cH]wWנ~wU4~+f{WOrVD}?ȏv=K⯩WHU~!IuEUidL=?i!Y0n>2ͬx);̯:W7'5v,-~c vN?`hש^g8)bv}*#*-nwE'XYv( t̹䜌#I˯] UZXL%hlzFl~wo_}{I6l,,{LVγx0 x,yIEy?cW>jMKi>ik6-C,"iE{1$;p0bV$,d([dXTG0kjywbFkZ,zgpO/|pg[;|Bf_|11g,23VZf#L$(x`:()I-zŞ.u,̸̔~(>j&/wn`v%Qab:+"C}YBZiM* KUJK% Hq,DtWo,=~nMKm,<ůe ͮ?cݪkr#`sr:񈯟qFE\.o(Ua_%~.Aq ~'֛n>S$R ")^=L* ׀kڎ$ף^UP}UKj[9e0&;"VW؈~={ؽK꫗=8w|=,1%ǐkzP1-+;m?YjKhp<*{"FI (*RByI5x/y/8؛y=_z%>. TXCLâtR PvM3_Ѡ4.- ] S=/"6WctRZKBB1V %DjYĪ}Nk_vYN[ {}pf>TXMCWQ?lMj)FЦ/9kA)!%_$FUNwx4c陣2~;/2uBs%pTx qL,93 sp빅suDX"R-!Er9@38\w1".G$;'@fY*4:#[ܱsF I9`  g!2ݴgT;JyVwH=. ҷtc;A=/L (KF\K ht跸bQ1 F X, C2weyn00lrEW ##&9OrbR;z_:'e9}&pi1Rƀܓ$L ΊQ@b~?7&=ޤ? @"BE LEt,N{՝~ O+ey@~Օ]yOw WF.PռθV RUk{|_H8*i2)cW*@|2^~@~g\n1q7Z۩xk[SLxRgѹ?-14bJ3ZttlX":Wv Ij Iƕ6@5$IB"@LqiqDW5jOS"'3;e>tVv6{i)mWaI/0a)ad! %åzL g|Qo3ZE`to"NE+aZ}U&Q˾3w*pV֐u+9:o7W2k˿[)]+%ZY^ hD GgNzMRJ񏗨_^ {ZӐ;[S[[R9Pu0<jHwsq0xhC͕Wg:j>NNJ'e5j;v#%6iMr@jM⭖Ty\֣yUqHȚ7 .ALHr0+̪.q&oP6tUX4MD6`p95qdˊHX'rFgp̛etj@|ԙIfjV6Ϙh( G3OlUUtKnV6- Hlv=3)XCXDaѭ$[0#U4DFy,e]/>$O[Fq~ a hT+pL@Ifl_ 8=+0Dv.Ǒh_+k@hV9@38FV×7MwN HW6:v-1LTJNs`yLga!b""LA/S&aʺ^ܱrdHU4 5+Og_Q+:ٷpyI1pWر* ot.\ ry];i2d{i10=pNOvQ}DŽuZWL|E2q^10rz`̋t$ e&@8XD@G* B<׺R>J]C0>H0G#DHt0.5S|ZrA綎{rRAe?i:ţ.mNfWRC`KE ӃcTppKq'L C-ܜb&~#RSJL{ 1SW]w`<:|s7j`T&y'rxaAQ)z@Gz OYPa /q̚ZQg B2Id!2!r^,uP ȝ( '[—,b(٘j#_HK#;Wt[9Sfni'+k YRXh0 YHB(k"<*Y\FA%*O|8t 2rWt(9Tm!k!:ԏwJc`P&*UN^4 :;]Rn1,r0__.;ew[9*T@ި lD XcwEjON-0Fg&0za j`l rK>r퐂<(#޸s*3}$A刺g)I(!N` P% 1h\ym c` U~T%KP!":[ <(j`@UV&REVFUG<]N6] E\\vX,=8<2*1+&OЊP8l(&Pb>"HQ·9[J<.ƾӻ(@5NgqA % %_]ߕ[` y;m;ǹi |D`mkCjt @܃`tKYntᦂmx3l.+=xMvB:x!@ŗ.9Q9/<zuFcU2sqEb{;GK Q(-@FOIwQON=P[ } +N4}޿kzA8T9'Gs%e150cc]Td" ِ 1,r~L%EwA~=G;x=--D"Dby!G`2D $}?!;7(RPH[o B őB8L0B\"-SՏ,^%7֊jŢZ!&(-ע|<58uEkLYuS[5 t۷~p髵ur=I&[c_+7Q ]%&K ~WԃU؄8_Fdmy /-WF4 >&5w1)_ya;\/q5"gϲ|tk6_6)~eoϭul)$b抎UD;udZUkZN}wWjUjjeT⽚֝qexi cɌ>j!VUxRHRb<-a{wqjb!OKNk楳 rۑY[nzn⋵2 B)pj9K 2(j/WȚ צq=NLAwD7ej +!p"<]^S!SϣB ze@IcX`x$k$Η\vLDlTk?J8AڀшYɩaI-ckş⦇F ғlC]qԥ#u`9]ybo?V'L|^SF4K ¢o{ϻ[X-%ZT'X2=+tYhܘr !LM萺JpX\#,m~Zm"[JR_|zab l@QI?!b#nѧM ;s)(01 cBlZM4J"C"64!ZP )H׏o⺹SsYX+]&-fϟDN֧ a}}oesWoI|7H]A]u&UMLFQ { Yz^G{`W*fQlPIi=`y0ZwgU] yL)F I|0 aH0ƂTX)C11v6Y{?ܷI:*V:HZa&Y0K6}#7t`L (@[m@-j躖]Ӣ3jؘ(9r46?UVk۫u08E{u9گZo9tf{xmf!:{л};SznNYnov]~?ga_ڞutPlXY편y)m, ( Ƕnm"5Yޢ_}hT{4+7tߤy9$? S%"ئ&!HDڨh. ,AX2(6JX@i[YGs]p]nsӿ7V7l4-۠Y1UyW ׊{hɪ0 ^v7}$ 4!,֚ril#FHj a+BaDGȦ NE*}+duoݚvos+xzik[ Z<|>"뺭le>[5#}x/q_R> ;16e}ܡ-7qSxS;{bS IJ>Ď H[mEpK>Ȟm*s;ʬLUq嗸aT&?w`*"aLD ZЗT>ۆ3EF :ğ֗vbr@Ԕ;PeK? >J.QzR{OשVW9 K8vC]c%VU9_߿n^SJ`usl< /M&.4߃@yFb#o!H*9M/j[uH~_4ͥD˦8{gjsk [t{7k`ve^lRU;j0s/l)}'g(?u9g$]BPAM=5AaA^lRxW%nZuTݧC @m*6GK>z`cl_Ч Ao}mxۖO~I:Hg䚄ckW<פO5 $G M]6Ic}7PZQ`@ԏ*4S-0Vl@?2񵟗J~2(1l:N%`n5H]NҼ}֎4]x2 |BHS%/N|e][oF+@b X,L3ك$2rde`VDɤD[mD`LJl6uXr1ܣ"g&~C+OWZ3ߺݨv+{i6>[?ym3q*7525nN]#pPp< p)߸A~w V /#]nRDU6Hq) N =G15󽟕f2EOIPN1mL HoW|6k6nԶwtUݧM%TiB*0*4ةSJ)O dUZ.;/`j~*RA]Cu%Ƶ|ZmzBo[+oz}5V"|W;' į8B5h>[C=Psg2!u* KTR2o8'ӂ|WNUjo]MUUK5 >0ǧAi\J=gJ54Ɣ=BWkFӳu6뇿vU1g[?eq>|@ƕeNF]".y hWT6Je!٠LfL )&H-^32`h(9VGƖPGޅDAP"H&́4 iWq'K_+DN rU{Q 1m1 $єp#MPMnBshC q,L ]h{0< nG:n v*0 Hl H0Ps -[S ۈ#u1 ZJ;\ ;iGJx… 1AJe}eaEe, R, %RE;UaT3aԞk=#)g)&IH4T8"6R!2|Dk;_Hǵ΀<̊7vE&4kq~Jw1h(BtQbyp>^'H$h=8X_ Ij,Q z's BD\B&uc.a\g)y GSD0kASB) @\br0 ij D+fSǂ27>y" gεC`)pG1@GPtuD@oCX &tVa[]4^ ,\S$7`փjgPE{~S^QhFlň1+=HX fn`U뗵ouܛp;fLJw[D]R,M[d: {0U J  h!@jtDp> $9u ;KޒC -H8p402Αv>KOAW%9v]mJ3JwAwE~{Q6 2ם@~fSᓢsDz}_$sU #}G鯳_&Ճwٸ`եdƐSNs~q&_UW'L*伺fe!iˮj?[MJI@wr)\?&ZQek0Ɣ;o" 3p|=d:F1*k |FS6,{ǖcqSY\tklW4Le]xbh2K9)~&R '|`)M}yut\tզї濧OOkqODpb^NSQH [om[T*[pk^ۑ?XdL[ֽeCF$J>@Ng0>nVIz3p> x0\"@~ ~l $ "> 6X͑G/r,g6+5?#ɮ`񞣔:My2D)Ve̓30cWDk4h9nDպ"5>r7uMi;|pQvR)p|-7wp<_%_"ȏL-+Sgwzv(i+Issi} C$wwKoabOkRt]?ѻrN=i>YL#S"׃O^3F|˳'=w4g &3qz|YAڮSjk.{Zc^G&>Ktoz%)rWφ\̙;]u$C .S7K6]43nN~"mޮd9 &Zi$̮,YRk_ t0]tU|47~4ގk7y!##z!U.摅H>HԔ1тFQ4`pH-^-jw]XB-j@rnP~q|fokiq|a:}oZ`jlAnޤ`"{&CDTٖꥤ9N7bi1g;Ja8҇$?ȶ5qraCpvk`J#[WbY vljв\ ؿ|jv, 9 '[ -ruʂ5 :04#^4L-,zav)ݷ'Ծa_zEdmqIAUǠޒ yY1#R|v͏Ɂ o|߯IsfؑwdfY <ܵeU :euȘ\Mc||*c%/٤ Lqbt̢[Nd*ՌuZDd@_E_64/wu@9o'=Nz<~x:^|t6NZ9ltC8ʞE.qVq1 A0 ('ւyiL(2 cg$cZadPg($^2 Y 4(&٫k Z6H iǔ#EeO"ybrw^7xWZ(y^4l<=̿a{x"O]mP f ~x~-F ==3mtc'?n…%= ӉPdLVLjoփo_7 ʢ]ِ j'EזwW!/!V:뽇5bZN;#%K$ }"PRf3J\H-H֮?n ng/-Qt4v!%A刼 }?a긫]:RHu2RtTUaL ˴W? W.8P84p 0XLcoL鿵-ϒ+| pnmAQ'=1z_WCtTN~UP7X>2m;,ӼԍWp?8*-&}#4[=jҾ7童&|TnLPGO@NJoX}hK!^tm&:xQ;>>ʽN6wٺ*)ʊ;s5ڗs M{r0bL f@W,Y҅s@3Dzn( ;kHeN)bxЄ> @f%//rf=lpY1F: lp6HF/g)Oc.*5X@Uve->;qXvȷ49VԊjIOڃojft#<ӔL8zbsgP+eR1EYz}jw199tQO@l7`-LL'vB-@H^jq7UMqolQE7J&Cڣ!S>h`(ZR݀iPEQF[_7@ÔK*Ljd<)V2""&ZH0 H8HVflM3pͪ&;Qgc[3s)tj$twKw"h@QAsgтk#*$; ѡ^54gl-zNy58^~gb\+M}HL=,@$3"FB:@π/*iW^w^K3c?հ(*/ a1s 1r+5V3=* K'4T[7ɁH I PR2¤7 ( e5BI$%-!)fڑ+/CXС p-06_fQ2GZ܊_8dkڥDz܆Ӈ"}tV 2$R$x-eD(&Z$QОE{X m##9JjAHPG ۛfj)F$,BH g-{N Lr F%sV-%&T(,=NqmP4TJ#NZ fkē] lP̘"-y) 8WW00rnȈ&*HΑC*(2v,gR*l QSR0 01)|QG#q{Td1Q3WBsu,PEK})(zFpJQloE6*QdXDm7IC-|YdB0cv:sܤ'̈́Aj6)(P3yɎO6B}@Eݜ7ʈ@EQY$-xqDᬊ{' 2j8zacw[Lv 4tyOC-ygqZ^?gy'2hҪ1'`||dp(5Y LBTђ9JHq؂1Sm"t/2o'.GUpG`'Vx,'8ܖܨ3~Ͻ&B`B<UX-zg `L"^ˈiDk45[j;kfq(QYI$2j(zd`2 pH*BfC0bB <* 8 Bm ̂HB9U J˽-}+rZTлJAĴjҼy~y[ cTuF(" (#$)8>jK ^Ȍ ^`8DA s R_5(mrߚͪڪ5;P~OŊF&xUTH'K##a:0- "WuBެ}AY1)f:$ rl-!\0G?6HII,cY^i8XG!1z ",iP΁;KI3Gc]ؚl,iY)iG-a{CwITTc=nU¿n};W,Ј= V`[S4cf8)1E3y<a(' ZH1ȧ Eo~pRyqRwOC.NM=׃K?n'9{'۴M3 K_F-vjw/_2(Pāf Q3s.|9B1dB -A?7CG^ٲ|;5ponA'=Q4h>M}Ee͝U7}CM0 Ƶn<|_7#St+f<#z49*S?whTʫﶃ,DlP% S }rm\pya\GCW1bU SM \JhOE [t,YydcÙ"k/%M=8%b"0ɂ5 Q u}3rgA'/vu:M[ves+h z|^n^,mH >[` 5.X)l('Y&{ 8*0Yx;&|!UT =--+j>rϋ7Yl#F'K }A0EKf-D!r/DǻLa?ho=~PeF; 8r d{Іfsc`GW 7r}vpM~VaDP3q3`8g0 ҕf0,g8g0 f3q3`9N`8g0 f-g0 f3q3`/Sb/ՙ-P.~)WMRPϖ͙AEtk@|JYDXE 'PFh~#JLJ A)- G|]V3*O]VH){L0?R(Zׅ4iw4S|3Sgx O3<Ѹgi!2|&'I<#*)dF pa|,1Y#츼b+i=7rW|l)l .y9~tdMmv8Bz{L4w's{ruPD[AMG%Cڢh}j7䬚>jje AXNO|0I b(gӀUE L{bi0bQKDꥦ0<Ѕ13\xd+GOw/$5ϮGۆӮ&=4MZ9)Ks3y_Yht)隙alL{BpVq%A ޒC *y(J;`1W[E 62(8,*GT 4!܁Bޣ3rKJ?h8uu{G+_/,>R69cLg}\ՁDe7]j J!GǸ./qAc` Œ9K˭.ә1 NAEe2'»1!zck(wp)BSϘ8P=d:F9iZ+I9nr9 I~҆=]?ODztwPͲ-[X쉭4Ѭ|/yLm."Y2y!bȖQENz]b)MtE=G,7D:k`"x$rS1Y xkk1T x$ )r[X1c2bph3܊]gL7h/-k`/sZ()ؘ-npGw>'r#i^Ȁ\e0񰜃loq5F }ox79~Ϲ Ch7I%Em p\rƚO=i)yML|ʛOQkjOc!rˡY3-qv l3Ȣj >^Gls%H9[*h N7c@Zr_/H~i\//ZW1&}1T@/4ϋ|R{+{| ^lSu)o]%3eV6cո6qozsOb~z,7sv[RfAersr7/f= Ȳo'bW޷ /'т۳)JɦO)d,Y0O0",k+  <1e/7LkX$S/2]=w{";]$͠m뒌I-{\UmE]/_L@7cq{K-nySx? 3IMqٴ"j*AkQ8{ ښq&M^2}sײm/n]c;cf64.66-3rȼqq؟v8 7a$h=15*\CLR+z(hw[ )g&]a`gХ ǝAv ֡tkߗ"T:W¯'R>Gi+Z\rxI/ɏ G>-PU(Z'~pRy"OC.NM=~Wn+w A>ݚ!w#W'\߷߿މZ~F%Vab]Q&[ 0#*ͼGUy]}mSh c,w RTp"fckY/lYpýyIO2xS_~}QY~ysmVc釚-h0u9xox:ˈxpcq7H oAmGz)9ueXUVCWWJɲ CJwh8+cQWZrUѬFuc#Fps LP7)ptӠmwBSkUI+H3oKA#.5Ԇ2xgPPJ O7 0yQg897Us4HM/7ڿ&ʚP L,LRc$Nj+Ҿ}z7.CsV~}.ۖb+s}֦On~Iʛ^;}+67bˏM/cR$ĂLX)@_U:Iq&ͽ1U=/&mၓd]`iz[x+2 ῦ*=fيTh+ϤI=- E¸N[ $s sb*Z"{quWN($uGx]Aߜm= y/pz^?om(fVVo*SVV${W ?݂Emd%}R{>M1 T=~xa’t/Li$yVG9R$J%Rd$-wa*!ڦ07Nw ~&}DG1NJxD`q9X-0;tF/Gm~J>(V)6c-~:"lQr% PRj, [C"s&S"'47Hъdش*Mv'EZÚt{Ŧmg/c=o^_ Mo7_g}~UK߭ͯ7n_nV_WiP${i(2#pm{,朷fOJ/`NUV`{{RR$főXu:0~s/Y&VVC49%!Ze¬ڮ}jB"*J9L'rΤHN'Bn]kq$i牔}+̄ 1a=36p*)c'2$9bO1Zks ,1K xhtCBZ79HQN} z >*\7왬^[a|t]J#!FD>9J&b)VPXAẬՍcCn`Iz7 8n˧EѴ˽7KS'&^n&+|ƏXR{#؎au)^/Y;X4i$"l(p@tp&lP6r F:"(Q$oA_jSO|d^&U:RX¡l7ѕ;yD,HNrTMۊzrGV)K>ƠSV\>,! 9_bW--lFQ(GFZY@sjh u&P @Dqx9lMtJ֮]gͪng~6\6/8M΂DG̍Ι]v}aneckm6?Bt?f>gv/W@|@ F4|mLPxP"p@F Ԋh(e'4}DBH.9瞣pK ˳3.A|[򣋍 v^o1'2 #ƘC N6O9Wh3 CP`MyC^7E/^$p޼g^U5|}$Syd: 6͟0o\p%֒560opo fz=L k9&5E^/隅rf}33MnQ,D,7 0ۗYnjrXM-d睐J`$f0<8IH~pf[rvAWt>_"D eTcK¸v|?$Ϧw{ <3uټ>j'旓o~i;N0]ʹioFo4o<)ʽփk6}1%_/8jηNn/=<LuںkpHq XIX6K!_p<_ .Qvv:"m]7#OJG@Da! aI'K☵R`C3N}Gh8'isIBrRGƢqQ;T !y[8A_l8tRY3w/lo\"#Swww煟f\!ӯwyDfE:Ioޕko,8&֘Gx޹ DP:@HQ@EV 0`26FdI,[ru~ "\"3%!U\D|JqweRpH,u2EPH4M)Q)&9IǨ4lakXa;+6 '`f?˸N5}$ӒyIkW)SC噇y=z>sH9<\v=;pE|['0nMZl\g*_9Vp)%{swc\6h^I\H_rt#]mIoZݥ@h J<\2OBJ0niA98L4Lv2P0 hgi%62nu4[mMr%NтǡTx`TņނgV5}w2ԗb~^1EG4:O ŞA+g|9$WemȁHn+Ce͖a6?ěg͂x5c.e**tɭ4+%pG,%=xyK_<Z//^%/O P9x.Sŷ* '&n͛Oup5j4!SK.1Sjqwiل`6C %D^ |S^n# };굂Vݛ u{x˦x{cc6XX/M&ZT0gf% |>?nG ^ ׼jFSZuXތz~Wv%Gt7n-I`G^@[s}BZNV >O.+)WU{Kh6S_" cpLsT۫X0w1q27ho2sۿ>O.lp>dLc!!h5`dcP@o 1ۧ|=2ytN.6ٻO762G,:*(#FDd>c7C Ħ1y 5NuV@:ߡL;@r(1E:Z0/K0/P BF`:`Vf&;Z3ɡ4R NTZs=Jh`̓2P:22餯QZ\GT"w0߃B 5ߚ^,@& ѱ;%Z0o+W\Eio2i61 ʈ.xۇB潕WHm=ueTVB$Ц5Ђyg|k#eVxckZj/hE׵g#SF1 Zy^bGZ҇B 敲s2mC=QX\f^;Ru:AZs+s&D u2*$Fq .Xvl+0f͂1 *>0uZbJATI),\gQ ѢcPh \O5{-#Rv O)| -wִvxCԅAʠKq+QaC9?厴{ b$/Xr0օR S m\x m B !M?uG5[\/"}]MW[P*&<`j$SJǨ ) o=_iBH FgxZCCsOB#Ԅ|uMP0²$:EЂy+̱,N TV%T#C N^\l - 4%*Re, {5JF1LmJ6 R>0Oij|ـHwUy2[ARn;l̓h=ݛKJTA5$51#,N9z"V.Ђyykl_\92W%[[9 PY] -3Oxn8+z7 !qr Kނ>g %֤^kQoZ_D;[vo|Lz4]?F9rpFk^gV*槰 F&0-(چ>RAI^I{12) GIO:$pW~e5?V7-Zt.ugk9W8X!չw3?shLI-#3(2j t&X@W5MKh,Q)Bo_.(Y9R6$4|@y.M8`.W{,s00_0eփ/lXQxv0-[f"Ҧ B]O[ fAZxv.lmNnly8*||1cቻTpsU=IL{9 .YA3YZ`Tmv|J>ewZ[xDO1:ӳlk[M!ew9JSMI!KIL6] hUdrs0Zz,Ѱi4ٝ,9|\=; AGY_4z%kXR^Mo~2ޫ-}pct|Ya}8)_m~166UX:m}]yxFwZU`{} zG=Ĭ5##󟓫+lrL%]9őE. 7Y^ݑOةNvxJ_#u:YnSjJ 7BۜCFC> $=(AA(<Ƣ0WuVo>r==5ŏ\B.חM)c_F]/.; %şMef:) 7Q?rEӽ",Ƀ][o4casxOݠTvw[BZ[ujZJN=$o(6nQD>go 7zC)V;!A8|1ԇ8kzС[M[pJ ϕ WmbݛHy/PmHNڽ.4sǹf}"2!ZNOjvEaG_GHrQќ6ok:KMb*Zj=y=r'q~s'_Z&/u M%isc8}YqQ.No)KbVHvc9uBkguDz[װn"So|Φ^nYlI ZYFZ*ykMţH!'W 6םR2h{ u=‐9:}g{eI".'_7`a2}_.udQ CQHQ46F%+|=w\x~`ߑml4(LA^-.Rш,\4kA Gن¾llƼ΍UPV|1dJjH0:M6`ev1JT'Z؞)K,픖-FѰMLPCkoR]j,zT e\7}ht嘸gj/u^Ȅhu#C3|[͸cB vz[{εiX)B^h6!!x٠G^882LkdH98Ab*ҽ R @8^rKDtET- k|LpQh{z:DCNhXST1J(xE Z+YrYZ^׬;P<4㡿 +yb &x%IyJ"Ňg9LQL,:9{z: .D:yu@Rs6DQ E/9橎@x2[Dy|nݟ_ŘOͯ'gdԺ/|\w-FO֝XP^gȑڧhUWyؤT*{O[,٤l}u5LɢDDJ3CN_7\b*[D%Q 9OO`꧟WD.D.4EH@ܻIBW^`]I/,fypa/K4ڸ_uxB]ܝ]*秋}%/-ODfxkg׎f2n}%I'O#z^!G7^>Ϳ~£wY%s6C9δzt5pF#&s|F9wJs=!Ft완;6}qUַ=v~ovNHMx L"ERK DT5)\^ra.»r6ɶj4c Nyp/Z~gX N' ǐvxrg{C8y)NN͛^Mf(y+pCAU3ڤ"MIUd?:VŽփ%)M;Zh]g8[jz|w*-%ho@<} ߅&zO,*R_Zt~y}# 9BTbqa 9S5Lod?m$SB>%-DZ/KYTjJ"_X!YAK wuPEOL}(ۦ@Z.4?2nJqZ / w ѝqBGOO-Ju9Rd$}U"Ɓr^C9OYuH:5~Om{On`)JNN nl'DHyH]HoDJV[Q׮z,~ڣCebj Mb,j-2Mpk0UtӊKѠ5-mPQĸ!XTkʦ\l(֙L.YQ1: %tT*=ت[%8Q/. ]ʅPFΖ-#1/$D'ӊ=EUKy,YKG{ru09ϹZ{ jt2y/JtdLb3HJpȫcxA\&|UD$B&:Kp7rq3րnw5d >Ų.fb,ƳJ{Ron&8˃ )C<.76Qe{TyTGw~QVVES' jOKEZYljv5 bZc%λ׊ʦIqtRroi~-so߷oc9G#706XltN-be,W sÓmehd"|XǝW·M-~Yʩ8f-c4-*QG3:$ Z>ttJ,%A?.aMdsMyd.p))߶K7@\$$L&Ol΂=2`SƾP7jޗ ,cRW4j?uը%wuը4z F;&A\ZܾF%I]@uEhF0FCQWZJtO+^{w@JF]5r`lǮ~BW/Q]YΫRWBGu0ꪑKjTZ TW-M3(\d<u%}WWBjRW/Q]YEkյhﳫkV<1&m,GxX%f4SW g~|h3-5F/ZcW#k*f';u8[ \0;XwnR/QM{Bٵ2miٿ_jŜ7?tڪ٦oKZ?Q/w#öD ֖#`aSKRA>EH5݋|r馓O??R:e9!)2\s:ԟ~j1H vHUE1Z]dKF J5[STuhhZ54!%jLl#}%m+UKdbRY~@ w##Ţ+4h[nil~_L5ƼQc:4G{=[׭Cُ wE'9λUQoYuex6PD̏zz}r}1>Wrvm]16w}V?vjm{- nN0JS.U%Gs wh=W5PceV::?W+RN2REFj܇$Z1ĠSEbW,$hW 5`"[{PFmE(OT7!\48Ȭ.#FE^~`Λ] /{UjܧVc]gǛT /fgmܼzu7₵o~>Ps7 oלkʙ LrE JrGȡdW+.jDIK?ن˛coûK-,Ҫ͓|`''gOXV M/d)Z >sl@5VILVIX5Ch-TErQBZBb&$p=$컀$c=#N+eC}13AmsL-V3Y F(ALC6'I$mL =,˻QEm%h{Yܼ+s-(y{ŢT8-.7%VM LkAD#B*@А` fʽ!C-oR/ykG΍Gm0CZ/: IQ+_b,V9Qfo\ DDm7oԺ̹)oQ2kOE-tv.5Y.~/O$zN>{=qr/ooO>n@k]|c!*7'`CKZd˅ڀn0lmLzB*R/JV<7(G7^>Ϳ~TnpaZośAx8Fu>EG" a =8vXvLf`g2v7"ۣPSEkF ~Gr0k@UJyKbUIsB4oZqW8f"]Rm VAEE`((R'`9[/q!b_kXz)w5^浏GmyK.nf7S+gI<9kF򋶉?cYc6&1ko*E,, lDw,F,&LMrA5?.R]$xHTuL* 8ΎL "dJсg:Ydq d@^ia-x'{WƎ1%@03`0Ap  m˖$;`-Z69 $MlVW ,1򬬤6[cQމ|V͚:ܴQ!xi1Ќ{F}Jh+ã`ERdSvv$;ez9}/2y첄,MRQ@b:;xҦ>AjUZqN1i<*ln6:1̨(n|e18 -ŮlI4S;QIp`p*ͪ7[dyo.jڤPzcPU+ XYT ΁*sϕ鸭Gi{)fG|նkTf>2. =OTrIV*oFAqX. #s^CwQ=f`nV![;}}}q B!dZ1MCiDI=&$t&GJ6QQ< D cIDq xRf⼵A" < Z*i]mw B&#cΊU\HEUt5BB#M*pdrC'PlȐŁMC'6ip{)uml#nU:::f'D0c'7͗Қao#6jʘY7Q)=/4-˷D_zO{/3É݀?LOOmC_UZ?^Ϟ_;t;(Ly,I{F 0m(?^þқ<|KS!p5ۛuϫ+וZzo*X}~xfmg04H+䒶`{r=y4} wA OKHTIsH8_VGDΪ:*U}[H JhU t4& 1X2cA[ڤRՐ<6Jn*ׂِg̸Kޢx L]B5rvl< >4}'ǖn&e?udbw}@5dde[ ;wW-E?50!2)E$XITX]&uA.HpAYNT,bP(s=RCJpB UK1|Fh6o& )4%g2CD|ޡQA=]1Hж-~=jwynY|V zDn$m47U%͏.O_ sxOhd2zq9F[:@l# $nɓ烷?- #7_|λyv;\=O|t˃[7JW p{5ӟ`OYCi.oօiH&*`ulsNm7:5m`Ӕ1&2@ Q?Ĕ;K')xqdS4(}ĕt͞\,jR.S~c>-k{ˍ|}b^VJ(fAAkhsIoͰ|V$X*[{z.ҏ~x^fAu|IzAV?sSqXQ8,[PX Glwx/8L/ 8c1eq82cuLq`;:xӚ-v*FԋTjm|8 Nd%m[Cn=ߩcISr+.Nҽ./fuB&YcwSfY"=TaaF LB>C[eGW L`cuBP;p$kӔ5 S9ᘴ{\2l63Ǿc/;u҂.D@'z-jWnCfųKU΁rf(jEǼ'|?_vT шlx!,T4u{BF#&_`ޥbգD{Yʍ}N`UʤDȔQQ&1F#\Uɨ)h鼀aʂq=ϤOYz%+K'iO,&ȒSb x,ĶA|h~~a2ӇxW?Kq , <@RbCxp}Q~it7dK-Wwr"rUAfDH,y|.ߩMwA\2Y4@M.:eJO,@p&,'m=P*b\pJsY2eRid9km<9 K*rn&K=tC0ƌHFsh{ȍ(e[+ᶞ/,:Yr1aр_Mbd{"2PCs# |w>|i^h;B|KxٶC ϐ_xoCGΙcROר7yΗƖ~Ma0M6G>֛y2^u=|n/=4xz#a׿z-o]L1];8ExCF8D)-Qg(xx5d!́)h"(I+p\,sG/NL%KMUY[A2+LE*C:̳,K$R1[ $>º\٥4V&s3nmQf?5#@?(>#5w1++JM/q9]q^ݚBK[g/S;[lC-`ɋZQ켋*дBнArU8g +)PfK% ApO4oT[ԂU>ك9B0:@vܚ1HCS*9|<  흄4M,ל:Yr?0}b,gmgɾR7 x03"C"wꪂU2:e|ftAv *ojÕj Vڙշj՜mY*R':+(dƄBp*1pm,*YRk0Ɖ]NQ}4?r8*<;s)sgQ6X$\$J]kɅ u%RREXOJ1uvB&#CM.ZWJ!KM.B.AԈ pr}}ELuWu-[ZJw2A*.JzJuA!Qz* Mr QH\˜|K!q!D ;)eR>ȥ42z n٧7.pkzmeg?cUp2GyJX-LH` Z!q!&r4ٹ*ۺm3=^!vBNB5LdNdcR l)K >I杬jg= uri.% PGl@fbv bV;e"`$E@M|y/!w^2`QZ+Q]*H y=2%қe>XfԪfjq󓚟t qOLژI^prֲD50NBаYNɖ2ӡ Jc/ښxeN)"x6)\o jŒf(C $,ٹ䲶J8t2 mvDhnԕ^:gjK3gۥ%e^D dS9B |@D% r.Jw^7xԃǭ1 Vsa+~׳XRҐ-y);򔔻p`JJoHeBBK69PR adNzss2eɨq /OI# x'~Q6i-2Y> |ާ~ K҄Ɇy34]e[㧗2 ;s#~â'9 ź8l}.ɗn,*x^!̽ B ;E}5p36ܙ>8=-oFك˗4$[W#fwoWHEmp=%VPҾ}=LD߭tyn+qI.re_YeicCJ_m_ WȲGt{:@;I&7dy_fdO;$LN?[-4 nqס4i| \wC#ll<0<\}80(^wC }4w^5NQ&$Zl>?am W0xlUr$ֶ{ H*"iv3J@|LwMOt9_B}7hNN.f[yt6,Z Yɬ\j<Ũ6udhL 3ܓ|YfÅxqO^;uMe]&Q:isU>yfԇ͊gVmYQqx+mTpy:#1cTYP #ZDŽJ{/!05.2yAN7s얘2yTi\mm*xzBQhB;*tH C L~[؞5$9*LcସAv^DВ r)0όщ.{"/rCyl^}\ڑ+h XJOMǯm~:ͶuTlo7`ďFL3dj['6dAq~Q7e*(םs}ZU9z%d:T(46k]1l?\Xo\-H4l R!(#w:2|JZea/"Gt qYdN%#9P;զcl~Wӛ"OonnΎg˷6bu'ܒ(;O3WvQY6=jv˩ZxSO.sh[&qjB2Ch{ٹHӠ!os2+&<4.o}xnp"WFn^y{|ˣ~5On Z ο =+w \r͞Ӑv͠9=GǼq\:<+[sX 4aZaZaZaޥ'@q0- 0- 0- 0- 0- 0- 0- 0́i[koq5U8[M8[8[ffxko q5֤\xkoq58[[xkoq58[K_+!,FֈQZ#JkDi(5cW5Ƭs<̿Y1Z2Z;47.]pxRC>BzQG>O7/0H 2!tJzv=M/b|/vYYpL3I3+W!)&RYM֞2;in0胾>(Zω yDc!t18έ?&]|9\& DOoIK>fu6s[H3렳^GHqe$2i(?>-uKw둳h)x;? _C?U[;5vxo:5uc}$LZ4R( FK'/122/BC3.b>U=8F م/E!nJZK)0*{զ'^NCduĕ&OVo+_ԿVU[W|ֆ iIDBL <Y)plP3ƀ1=UښcFd<˜%$zR(0&f"mM*s9$!U[2VvXJ9T&?P3|[yu]^@Mg7p5 ?ΏǓd1՗rUVfwGH ) !0Rf~śvZxOvD edbp T'r3L Z$a&J |ʊ,ב5`˯aaaYΘ6@hR ##h@$,$472x`$ x l755ioa{EE7H6etN`[< y%."q;@Gy]xbQkey%{˺'e8VQ{w6q`krmF l'w $u4IJX֜Zhe=pV"4'Cp~ w H9:DF#1"O؉V 5}b"š}v"EUyfl\ \ cʖ%%2mh5kKnʁ;Ǚq ܈be cGi %,C` sBw-a3mf;io._J<lٝ R8UJrk8(bEH;gtZUr|kضg> XvyJOʙtJp'JyQy DZ΋3c.1y +sQ3e Z)&&cCr4enҮFwpz{i4K=L}oC}w]ߋ׋2>qx=-,ooOtxf"~+h{V0Q]Ti3dRq+ܛIu|xXJCٝevP i΄SR/1V ~NJ/-OфQZ\&*3/ u҉d29u.Jc  yc̨˜A2q:y'*,n;0ϯ6^FQAI4x1ѲZ6S Ɣ]D%^xwa.ftJV#w- ~ܳby:طͿ9#%7hۇ˫zfsIS&!8MO<=e:0%^* ^j+/^ Z0knC]w KXcx Qޕqd/`r݇ck3L&/ oP+cԐl'IHE%J ~uU@-PuX4a))NiN\PRtA< m8H/k/0x@wFTqexhT$`H.x$Ix4nQ"ho#9F+K'h֨ՌsX\Έ=f}`swߪU vg=Mszp )u2FacLhz%o$s,Y) "3/R**۴c28lh%I0&*HxblFJ=x<MӺ8Ž'+K6EB{][cYJӓe5 8a W'TZ4.ەD/\_ v pfS"/eĊks:&j0kgp3}p x0>Z_a>L gXuطmkRVy|Z KjN7xoHIǿvYN{95+M1< 48 @?20H^z[_`ϧvDMmOmdΛB4~TKk;3}ucVTZ-欁ٓ;iu@ooΫ->F!^?GA q!!Q帼ņlBM_K><͖݉#hM^G3~\&yvO3rDoϊjfaէ79~Lsj7qs*tokgpwùYqgz3wbsw{ꤺEb`.I ?}_zRE4(Qtf4+ sa>s9CTK'-+m8]~W 8pxcY{?UTl;wsCqK IVQRQN_AmiŎKK/Jb)F1Fifu-ܼ.(/~mNJ0RF*J K|.7ϳw7"R'ƽ~ZW|@p;ԽI߿k'R"~׮Rv]'5XvCN )NsQHU*m1Ewy)mOFiq[._8P.@\78zJAn}9 n%".m7IvǫayךWJp%Դ<ivKMZ.-Y a]>aJ"䍒KT/I(%^M=o%ohdZ}퍟2Gi.-ztK4*zբg97B]M VtTjdj9.HkhE´jTn<϶r=K?.Ex5]d5'%g 7e aL)ƘK6LTz+,ؓaә\dJoej59t64g/M[a-/f٫F?_~CV q@ C?W!IN$>-hs+d?4htk \'Wi6;qQgiRO?an_޹;O{IKU PhsKן×PM(LB"!>i]S1qEGm]fLPa$O6K*xDA;2$MQ9De{]sis"IR\&AUI !#zj1!15ÐYX!\29t}C??9ܓ:o]5ʾjT mݖ|A25J++H}8Rkt>"ޕL=VMH%yXXVM0j$wJFro$o0YOԫniVc\`Jo<^47w%pcܳ~],4&`dxsx'4P21CUR!zAyo@Y"1Z(OJg'֕s׆kÀqi駳'd-vkcd $ DzAzGCA9M= $K'XBP$@&ϒ?E@eqAkML(M9"Y䎳\uݽ>٦{Ix"`+E˷{r@kyߔ dWli^x`\ys]nrFet1yOrZ1qʐ@0 =zDpRJ%ȤJsblwk) my!+ y^yႢtyfS/ݯ|YՓ>P\p"gÆ'fEFMA DF#&&,RD6%lΖ'm|9D%.P!T@)p$(Z/8 ̀hJ Xg1k(ɅgP)8R`)Gj&TęmsrjE 9#G>P,V )|jXFI'v権9*+;cB$a͟g _^!^;@3ǭU*)jDNƬ['RpHOZjŲ{S+e}cZ<7MO N3OOBDHƂ;8@HR7,vJNP}H Ezai`rQJhhp؋cg cg䩖[-$c`FuR6\̊ʊ8&W̷D(aZ &bKֲ*]H^+-x,f҇A UCoohd6!kePZ<@oBhr=QNiɁW;Կ9S$ڧ8H[%yNK۠,0أ˕ܷrɶ^1/ NEl1j}3x4(C4xGk4I#-t޵6j%ġ?.co8Ut>0cDat4@#qH a&#^#Al s,;2NRv[3AXa֖IhlR҆ |YYYΪr Y˨Wgn%$Ņ(Ah K 1[1G,>@b"7 $/ܹ( oe!edrj5, +9AJe ⨤8&izsݗjxbjvwƾC EobuHiMYRLj1R [QIlmY[K mvDqjJ}޴B" ,|MV("2sVe7{+0-C>~Mzi)/W۝F{'+bϿ7RӏQ {yݨD$-eIxˮ߲oj SQFgyFd:gPڨeGF]:O9 l/Znf?j=g1Y7[C ,$7psAOfi>mn "6 h 0&t݀CᐠPf![1xtB u\J12-d('"zCvѺ tJ.%Yf|L>b:sBe,T쩼X  fM0o.K /Kڢk_߮Ⱛ-YE`.)r;/M!vmf;g 용;+WvΥts)+I\ϩMUm]B.~6 rx3MhtC/r^NLz7f}NH#tܺOŖo0P7AMx<͒g?]3qM…^}ǚן4|I.W~kӜ}pL43О,xno~ ߼di~/?WYܙaj dCEB)Ѥ5zj |Fn~+zJ˶Js!ohuYoհy8HԪ~?7XڏokxǷ*0 lAnz)7{1 ;dy, ;=+ 6(:+-R(E#o.fux\\{g$sA0'oOo_~eQj5u$( dH˜`e({e/XߢZ=oEK|x |V 93r)A$A&DsLkH[rf'lEo3'#`7ɪ\btGpf, 2F&H^Q.|ǟ/F|/s0>7(oc9UKڃ}ml KAC3bz jyDM ) kKSB7,TJ54|M&U!XɣQW\ Ǣ V]]| +i~U뿫*/4]*>uUսGW=ZH]:uUG J!ĠޡRs<&glѨB5ǢZ!lURA]Cu4xLxU!WcQWZ}WWJwA3$_G;Vn˳ ') 'Ihկbm!H6Rw@zRL<T";6[!+BJADIc8 |4ĸٻFn%Wnz`fp"Hi;H$Yoպ-2eIv'j٬Ww^yoP B2 TCd_X-FU iʆ|Ix륬\Z{&B୫dk*V%UO{"Nۤ̉)sJe]pqB!q'7DjGm"WY R;] ;ddc_=æFXw7f;\ٍ46sgzP*vQhmPtbզ\?gמ'_R}el/=\Ao'[4.r-ڶW?}7 nÇA ={YR^w|n_daTO|-Y٧m Z:hWöCZGBZJɊ#.\!4ͣ樵-f+*bTXmUţ Ƀn^74GO/W՞P=NsQǨJ憿Z9"G64H71D'*@T.*#A4x EmtL0A)ML 95K傩DU Ab"ƹDla]1rvk\ h|;ox:zm`3ss+jU PtLs潹}M9- {Q$I-9jO6dA~ϒEoڨOrpTW{gU*ku ggGE/L%c2 @J*Fv~g> 5woS[9b^V"J(e_l6b.hv2\בvUozl{e%YM7'7TU:&r0R Z\Vrc%jO8KNPX | no~8,\`(^ǵD)dͭekV&5o?5vwǙpfǘc14C[c7fa'{y`jһ^%|n#EGji|8sNdӶ< zS4棓r$L/.f}BQc) ;\i߰0c܅W&9>][eVG ;Lg`cuLP3$5k)mp 1Dۤ c=G7{ċNYtXg"J}틾g"PԸ8y N F?_vTI3HdAHXœ9.{E@;)%g!P4`j2ا/ZҽtZAt꤃4d4 ?RJ8G}y=?&8__*e%4uJ#T"Ȍ FE&ʞӎgԢZEK|xK^#X͜Βr.HDxũ LJqDklM@ܐx* DR%Y >pJF"( bTʅo -g@|{E74d}kx0J-ZSiMeUJ!* Sq-`!J;,h% @ujW+t`^F͘ ˒d<YR$r'ǩJ6ʙ -koRR(n]M$BFDyCaIx*mo-FvZCFVb*Un8Nd38z.WG11)|Qi#/s(La8}&_uϟ0ɭY~Ӎ.R\_OCd_pFu˿U2+K«.Ԣ8A)S#e[z>5g 9sՈꇋ)|ᅺ^ȧ?#>]-s ]|cCE/ˏxoQZ64n1K/;Řg_0{Qifn4O~AMF.Ϩ]'TVSwcqx7z;ޣ\p?Wo=t!cut}xp󵀒T^4"aATzKS;%y[Ah tFT,0)xYDn~}:8kpNRxŘ&ǫtonY?=k_~H5,w2Pd$s  G(i#@MbZ+jhˈU*ddCp]D74Do4#ĄFblgx9 #w^{$=$S Yt|//BVEJm҄i9{odå@@@4A*Zhͽ#&ڢQ$w$D EO 9EC`RN@R%c1rK((WpСN y:Nǡi`OT>u;0Bu*KspHB"1" dy,e܍wA@YI2d#(#gYlr^ ̂Nc6 RmJ;K]bSvǾRڼ-!n~P+6*P t/Ԟ۽$D!-R7Ҭ́FBMG-!UVz!2O-o%r\H6E|>,!gzOqk9]?^6 -~39y9|2_t2 r7801:UQA#A*4T+/W|(eve2W3W)JߢA@{߼gV> t&Fh.4pˢRkbS,p!y#;c)ҏehNl41΋k uژ|\+)rfY3wk6rdU/4D$X|,p똷6흔lfSt[$]- ~ߪ笠M|7GoVLy>_NWGUt"Xex%wU$O+fAb}}^=ZVSg#^Ttsǁ>JI&G)L䚥"'^I$ܠK=xJC KD,ټW[j14{&2$1Q"MH Hó[m`يYoc|et?M؆r*7v5#gқ;CrfYR0#$|rYxUA!fWnMqrFkbPC"S&sq%9eiFH9pN(XpwN'op($hڄ_ғr"O9\j$FS9E4P$m3"8*%UlX>VD`d¬U cn9m/2/F9/HC}SM/OX,f:UM$fK 1Sr6Q lkfD'_em"sBzwJ\^=(FFCc=oHSP ]B ;?[RcQT*6R|pV2dKM%ڂ*e}S{k]d>g[Zؒ\F3Wq򶚱*{$ݸ3f,yUEo9>h]e@ X}4 A,زtvAo;76]~V4WJ. fw=uUw 'zw9w&FmO:!H]IudH0L0oqxO='`坵8KIFoHGk):F$o)*΋ qnud}-tBVŮ=vV&̙x't~o]QUR s8Bl }jcZ8jz!v@v^8۱-T gO@ZLk#Bk(E #H!ԤeP.F˚O:m.Wx$efF1$)r HS:-غ9[ezJϡI3/ LG Uvw*WU:ZJ)1|I(Eo ޱAJmn]MKnǨ5% ._xR9je"JDC0vW9=6̖q)(JNie$=N*(OmΪOZFV(EDF.YYK еeX(,m0!e h)_ z ZƈV CH%D1 u7+b)H P`' vֱ3PH,J8뜐,/YN@a8) M/Oz3إ=Oޭh 7#miB;#1WGN6׼)kr@XoTH!~n m<"Lu5;_Li" k,|.$yGU ]0:z3RJFI Pf%,)N>EH7z 6os跹zo޳?Fg~sy #ji}-Kɼѭql&^t\6[ũ(:I$ -PjUw"BB3rsbب (9o.S't7ӷ5K 4`^>]SO)AXpF*N gL"EɃ1^| cN4)KAJ`r1Ć^YB5R&3`*YjFf ~g9eU͹g+_[Ҩ\aSb,dYZ$`>|~ӓ>?;"7: |o400M!&ʎ$]2)t(51*K``(Cz6l3mFm2F¢F$XKdF;]p?_zO$U*$ZAf=9*c[lPHP#>j_F[`86-EӁR R: A!HpAUPA-e`ɍ҂%ĪHmj1)Pb%b~9HdMQ^T NcJATE W$#JKIviblFΖAn~sW!+ /ղxxv}_'bϝ[\&sEӖϓ)}*+6kV_|p.Ǘ+At["ג6ԗvW}'1/VWW_Ww/Ϗb7k67Y4 ʎWvsn+.|ynW^ZgH ϳ]qm.#_-_w4~×mo^{BQ$z˭dL;i e޴U=W\e$̃H(hh}@yp@~R?:/>p.TMƣ!Gh|vVRefO./uTMnNGϠ7QZ |x|>}_/;-gNIͥk3g-q 7m@w_hea|3UnGeY67G7ܪ,KCbeSfȱTPNxZYJok)U(E(jMQ,3}t)xL"|n桮W<=} ܿ?~|3́1<c.Y3{fOcŴbKojJ7"Z6_f퉬mkȸݠB)wLRn>Hׅ-],fkfJ|$tc{C ѕC^wX[ 1I]PnIb "|>`ltE=·-Ã)^wBe]NZխ\CzųMPt8qVrT~<&E+uQig"mB*Ze%n2Ǩc ɝ ]`BdS'j{56"n!+¦UG3d)ܡCѺ " sDΚ8'}v1|;mM D_^!jiZFs 4*$N1=D&^㎲آ_b; F-|D $VΚK> Fr>E&1%UJ>8 G7t=< J#DLPx-XB#se[mZ|D%>bw{uӃyXxՎnVnox$!ѽhJJ-J:^]:m OW\%¾Vxo$XM0%m4i2ύ٭83'Wmс1xޣa%b\$8:$Crnh.o=0jSq]M(_w :eE 0ѶZ~d }91% eґFDi Va}޽ӴGP+R./S osG3vodXc>8iF.iҨѵ>`ՠAEohlT`@ѰL?DVŔZ5dk``\0&RGK>nL֩@gH⚥NA[IR(}HȲ`>+nF)w,,$!̵Q5r6e)2W> } hg`W9N-Ԃ>d`[R)!;04J(8BmM9xE1u,>sY|":U|,>8W1ƑsL$ S\$҃fZxhB9̄"AGɁg aERs D Jo 3jzLa۾J6闋L^~Co,'*JEY2dɣ!h]^ne;KFtHeO͹k(b'^@!pۘմ*Zb=29pήnrVHa<㴑w Y /\3ibY;\v)HnH/+Y5r䳖6+٨ɑ$ dDIo4,4Qre%K1{%03^[TUb`,pQ'2Hp </E%47 Ͳ]IQU j`' v3pR`>10 5SI9Yk'a!hXI֤T (23qQ'^SX~ɓo(Y7#mԴEФp5KI9@e$,qZxmwԔH3zcaj[g+3___c ^ cr 2p^7BGL>cƚ8v,Qm;\ep z7-5iD4i[(כiD =`pNC>vBXbm ʍ5uC 4`VN'b HU }{'@_[(*$1Eh4*[T٢r HJzܛxElW9H >gC=;oH=ݨtt t$Ȍ(\EB,65bu%A 3kШ a$MI PcTzv#Ϳ~WPjc-n(m䝕ZT:Ғ *s⒮ƒ;j:t5no*l#C%ӛlJ4T&0eظ(RL01Ө\4cUP$JXK40,z\x#)8/.BWU#g>A\tOo&O}> =EllB5o7yЈ#fìd=(28- F OI$pJk}2(0=tk$Ϲm;FK^@^vQI!t$AL#@Q,|<og>ul.)Zr]1X,ھ^9=4+ȥ^zj?^9J^9WWΎiQ٥fJ%:*34ga:'tM*7*2߀ߔT #lr?C?>X#Cǐڻ51 ' &霹VtYq8tst}_uң&Xkן?Xyc9ms ?XLe~[ī}|:՜ .lPF !4I'h1 n84LSYFQnI 'B1r#RH,sP_0ϕJ\dm(y< T|"R-4gC0|k<Ȃ(iama;3/\ʱOHϼ͞' i&NՖ,\x5 ?~?t\1TTy3"/Xnm~ ja]u||Nm; .+X 3.Fxf#4EgH?ẼI-ṷ'^|2Sة G#!ĥȖ͞\,_ILnNGϠ7zj |r>}_/;VbHͥkCb +>*[{^ڏ (fg=_Rat B9])o~J8,o(VD)֢ݭ)*ެ\>>g>Ùcy8\]Wf)'NJiŖV{P;En"E*6lYI֐qAw| Rܒ |tYYvEasG+:1M,̔vI`ǣ+džI K!c>jmE|2GuQ)6"zo[SxԅJ ʺQ [^拳g"H坡p;8|yܭRMVf+0, 8D0ڄ UJ0 ٻ涍$WP6U+GƩ8pU"eM )J)- gg=FQL1hkUY-1Ewtcs27>nJ M;U;\?)$۩T'JW+t}-FR6@>]|j ^r*ǜ r Q)>~;Yv=Aeֈm)}-,Ϳ##2$lZc13ǔrĢ6Ex. qb:8d" FYu4D{xeqc*hm=3{9A vbJ6R;.X?..kgl?7(f/EWUK*[}l0 8YbzӁj::1{iUxq6d`7ͫ:([zorS.׮e)}8y<Y[ւn%c,ui !61>hH"#2?y 1TFmp) JH.F\ ݢ**=`ҥ叮7LGr0ȅDԓ<6i/R\Kv@ EN$]ry3C2&FǤ"_c]I%3b?pJR֘DdV3pe0YiW'QXS*dh$"LКB(u BKF+D#p;. YCZg[1ؖ'1/gqLeV6ƅ0L֜R^Z3>|8ZQܱR7Igf:M߃4?l.a|H5/e03{j%"! &WaXA0SD/bMQ ۻ>,F@%P0 4+:mqkr we 9aܺ\ &- Q0_/Jp]H;J׿W2Q3]cI(gXrn{ٷ%5I0}ԍJ"L~ng|wVݞrʛ˫Y0_~uΫqz:3DY{5*W8՛ l/AlZR[b.5C6([̬?( F0bu=ŴEwtOXu*tu'$Ҥ"f>RK*B6Y#|Ag_(ta~u9ssPN_z:~OӓON߿y 3w0& !!7pN>iT^4Ul9m_]d{|Fv`&mi@~^}}K9uv^63fVǙ/ۺEƏ(Q}aj:—ȇSLxdoPn@Ep}Ek¯NC2 э{㣛RZAFI:Cֻ#z|-~{>2KwV;GJQ&/.Mq`p# g;H5EJ@9^(ӊJMU>ZvnF6D">{앑#;΂ [ FG3p,PǙiB~ zW1X]U>oc݆44N4;|}Za n<ꅀ FE3|n96T_ ?&oGɣX$! 3&HZc Q8">g(j w[!t.6ȹ`CȰ, A[R.k%E"~18D`C Ap{ys]?‹D|"HĖOa6zL,aT]ENjNTќ9r7q,,e<31ν׹,/ۯP_\WˑSfAT3.wnSӭ;gz跺R^l= d/DYI$*'5azd`2 8 `!3Ph5,(vGEgO]`&Q ܴ[)r ǸYnloS(sǤHczf-:OZ˩S;彬3UԪjm9_QV^DiG?%USUp.Ar< sGi)%ZTЍ2P2zE,#'TC-&x#32rj`!jR ӌu4˅V9sʱO ibŲ$ p^xr OP qۑUq[q@V1C,uX:4&X2b/j( C'8Ph o&eHLBDn-kg]aƴc]֦ 6ݳރOZM) `L! H 'H pCz #Q iˠEqW1XYÌgFjp*ތe(a%#1Q)DXҠ 0p`5j`K{K[[Z-l FDV*r[u;5Vm㖉c+ɞ(B$l|i*k!eI>d7$9'WO_8(=>ʾ['oL*Pt᣷b0 0G[n״y5 WlmJ5EYEL R+ ظrvgZwW~sЩ~e|f7SR"\BǷRZ|o5T<%63Jnݴ_]QrJϪwW~-nT}5x3|;@v:r*엥< {2fj8z9|5c)cHH\BW -ŻNW %C{ztRZ"Jo ]%\ZIvJEt 3XXU[ ZѮUBIȞ!] ~n]%\BW y*Q=]=CIZDW W˶U:]%d >OxƜf3gLP.>d77Ǫ6wrtR pA4? Vጿs Bf;B.0zD`ݞ5;Ko MڭnN %a{~4%BM^%=tl1ZvޫL(17CWbEыC$ Of M.!GR΅ξ c~_gxq&S0|%Rf4c9_M`%U_dhd^fɜQv\^O;v?7 e:]91:Y "Y7mW-62ЊJoB zݨKu-[EuFb88ZBcMMfkCc (B@H23a?.e21.0eդJ%iәq:霭Wߚu}~7euiF(W.H0lz%3c溾,#Ԧ<,M&ei2jח&JIyK!ٖW -[V%H{ɱ>Qs-9˝R 3<_܂J΃0.N3s, /{qf-e6]K]sև늣?Dvr&fsz겣6QN)h!9D(Dc2_ݲ_&nF`4f[\ &CTJV~>`0ʝ9t3h9쩣3QyC^z~p=َ!aM󔰞)uXV[y4y32U-֚Y&eO:4(?4.#}&܍*hbe . Źnhe޿HĈέ,ꥂU+ Q Az&ǨI*Hjևv 2VyD3̄1 |&Ȗ̴L7BViTI f!ۚ]_M(g g!SaE00|t9d!ɸ^\dVZ6\ښ2 -RS%MkfJ_AR2*Ş$TuЌ U.2+qd~ {9_1LDͱb0Z(. CV|^^oIp>oߨy4I4TV̆tά45v+{sŤVӕL6,]cHyHi m(I\zS %Ih&dJQ1њH"[DW 0gWf&U|*zOWϒ[˯eIse0̾?茕s'L?r3J~~U\N4;:+( ygyp>tpwBpn !HΆYqRYwО5t#7q#Ep\^A?YrJ%˶iK7p8`_)spE9.OM W&_jW9?ޏм5ڢ,MM '0:9ES2^ǀΏ] ^tVlšC"V\[/DBJ &UZ]%gVbϓ&1ƙ֕PD8iq&H*_yT= DE7D¸yo@Y"1Z(OJgOE̛ry35v=hn2k*?{Vֆ s?O+%akãŃEhuX*\ryL&o丩{]`fj^*Ѕ0!NO~jLЎϞ/˯7Lr.7o2ԲͩiT] ^Y}k丑% ~1ukcd vNJ"= !q堜j%eQY@mAo*Tdd8~4PF3.1*/ךQY* Gy1 :@TZ2<TDo2jb!]U Oِ?A8f+JDl =blb‚*H$hӲnۏqɹ|*V5鸫-[m{{ ,J sDa5[.#&] C=wÄ}IzȈ @/:phk"0GQGtA;ײ=lrGg0#dT,bkqW(Zbo ce %Din+HEK@QN.PaBmc~fWT״=yM5[_WyQN2Q[W,S;uUnURrm?9SJ,=[A:3 Ȍ1 Neueɶ2ܼRpxӭ16y8V0>?v.gVS˻jx20[Cl?LqWn|]Z$ Gو^5ۦ斄\ɦffYw>,>..mOk:7KGUFouMn+ˢԴZGnX0WIΦ%˩?RJĿu'OQW߽>{y7_!6q߽~3_ފ$؝Ie' w$Jx-m5͛Fܠi7iW ^US8ڭ~hJ8q0~/C]GdUnyo1c.q'd49+~X F8v1cވ);,nߴ&\@MTt:Z2]!`eVRPG4UnBog`@Hb VFb A[׍ cWſ#?m;tI<+ ,OIBL kyw#ʓ :wzednJտ<Ց'~]`!xŌslq.2鬋- cgg_*r>𥴓(S78]b]!5c1 b A mbp VA&&\uz [݃p} ;%u4qZ0Qy "$yo}uإ5+UȯcX VC'{Ϗaؙ`<P-ht:Lvu.4{i8:nX!˴Siw+|1y:iKu%VfM=z$H%5W եA}Z[Rh C)8^x\(aqvRK>wZ{𵮝{usåM4)@ 0e`@jMv*291hVTUQhH$Ҙ/ Y~Lj!9S1O8KNP^jVhǣ?Zl=\<ѾjY3aIV|8SAefnѯ^QYN]yomu漁@v27xXƷ t^S>O^:9Gˮ.24ףv:(tܐs0۳?<("#dB@ҏ`R|c6?{ȍa-m`N;% U$mŶHdWluen۲W7EVY_*)%T*xC*]U9PHpB:͆s &GX\ W>hr%nbmF˛! kc+ "w^{P+ȑe.4L i["Y!Jф5qI!9y!H0Pc%W}Cȹ"M2ΩŊ<ȉ-=CUv]+"od0%UeR5XZL`5w@z+a8xwW͹UaE:c8XI H1jltJ'o1)i)iZb=.qF[)R8-ĝ%Y /\3iEN1]J$ ^g!mWY[ZsBYB"pbFQf@ɣG, :$안9:㵍 P%)> 3 $8IB/'>FS샎 ͲY@jpq4IQ.t1 5@ld9X8 AJX?ZRI^ 8z[U]5JwCU6m#hpUk V@eVҦ,qZxsmn"BRU*Ћo D|gkslRP$bi19u**eX3@ nV 883}0)f{`ٶpGo+z5AOecp QnR5M. ^nH!ap6y&gm?ZaP?PX<8J*PVZ;4 yZJ6="[I97 |^lad!7d$d@WD6Q;L֐Q QoTT$G7-a}ueYz(|r8CBTMɜ Xn75})sGA񬓰1`ìq\2NwyN68[nb}7Mn 4u_!%E XFZ@[ 6"HGAڄGLgMcD y^ JAҙT`F&),"q4ct9hf Sz/kc%Njz)2K.L%FR@yqS\wrɜϒ7߾B5^<)80tQwE}kvc@jXD⅁F7JΧx\+BgJM< 'KLEp3x̑RrI䙩ez}+[e2H25IOqEt'6d?2ML̜#lEk(j6rQS>/y'm{#qBl;&iu>/pKg&nƼM!p\&n^wQQ8[3P 8.v+p}e3sV7MVF΄dAAD*gA`ʙ{m;ؔ`'^'_xclEu43.'  4B>A" ļ_mUqok>]W=íU/8Mv])XdQ^'?/fg@Oo=xY-m<\^U*$C6!Ƶgk#- ׌=Rxսh?Ig%HgRVΚK>R@#CO$귦AȋO"葻M CTAȜ *K3퍜Mk '-T[y+Vnc9}A?5<}Pz(Z/eU3g«N85YQjOeJn_C9)J\E+૒P Kǵ;֋f#B!ew 51eT*x$b"Ιket3nYPȺBW+a۽'=AlAN9 ܯ.ߖʹ`3A=oV>=ۻ}L RQJ !*TȤapCNV%u^G!fs7ʤ0)vP"BȘC!\ŦR2: )"ϕJ\d!C$g4Rgdol\e)_Cbvv2w0;; zy=oZӥtx҃v5mig1z6-]7]OOuIQW\K2PWnw:t"0aC.e7skϷjZh{^i~=RwoZok42 P-7/Y-\y5YϗfmM+w{ly2_󘧨Tuam͟K7lm{e2 (V(RI8Blc_s7ɻ7?ѕa4Tҏ$fg#G')1OKጇ j9Km{R0*Ŋ[Z_Ԕ&2V;.snx:R)~Fs ](·DrReݻCK3$O_Z?ݻ0vWˆF]8MK|ByQ&7W%\tvNllg}~3\f+E!+弫`PekQܜn⣙ݮ:ە*=[`v}qc֫ a&6/_^<ݶGV~aHҁDD/ HڀR%CHQ QK4]T~ 73%,JeiQAp:.{!yEw"zηljOw;^˥)mw*~hBT,kc"ȃG`pxC'g֢2Z|Vj BGsBWL"6]%KQXZfT f *;4zG%H! Xen ʏ*~ihw mn 87=No2h2ANLZ+ e;mVYlji!:Z#6rs& dPH,d0 /#'ԻwxMThyrcUŲQ$`?bsy][5J]z滝f vb[y;GamQc/&!w6?>Q\Z$SFWbC᎔bKg3.9a;R$t Gg; t^X7 Yn"W1fm{d4Y97i)a:Z-鯖Do^T5*nԊRe7hCõ}_,KlX)}*%4iOQ5IG:Ftfu|$i2`ŲO'GgD/kNi9`GmkԮge):/g2 Kק4Q|CL}S%N -;OG8>9'E{߽wo??}=b} 8X$,N$˃P\оivhCkS7ɸG:8ǭ1o5Iio&K6K}P]/ved*'״b"xy.4颦W]Z8H76ϻ2Xe]%k V?$rVMsqVNiD D߭u agtrI"2#sNj^P? ӓ׭X^2gx3ԄBIL)RB#AXfSYmly<'#\)LI(t!J,BH*@9&I`z6@sUz[w.W}C0,$Z|F /x҇Uv઀W*b [[c 9n[)+G,˲f|N"U%3H f'A&8T/T.v>Ȳ@ʆYqU6 9(޵+bm>>{y\Y|Iq&Ybbٱ.(KHjUw"sB'XP} |65MҕG&ajʣY%azv;6~KMާ]vWW iMOj4Ir(d"TG!u8e/aAMc Us֤e(!eSߍq L"Ms EOcflW|^K1P9f@ Gjy]t.'ngFDѯI >DoRn%!A*'iOFH9pNG>zWJ6MONQ Qf\)4Ji8c#26ge܌Rb ͌B>c?@bli5\(Nku?htetqZI*2ȺZ4F )z:PEKނTW±mY}1I Jj6uBPow(-)W_J;\Jpy*Vx֍`761hsV2 u`$L5\ ͹Bƶ6@~##dF8N:;5E*+,\ n$!6n~o] Xf[D,`7x hu.2l@VjBIWP$)*ĤcXMƶ^[82p\)59PڙH1y6[t)q$ke]!["Gw)_l`KNZ͟pzJѐUlFܛ2T#p|^'vwosY2v^XAFEC-CC>4+iqJ}{}1'Q{mBєD]9Aes"8iEDD1X|h'vZg#~.y|jՐ3)X~Z^w"vD&a>'gM>YjY%zVh=ZQFmYZ[L;/Л\RTֵMy%֏ !&'7Mxf@빁rӫ=o%Z8<s$20f͗?9ވ̆$fZedIwh AW?͙;q385ynQ,Y3}B#b!9 hVhe:$DRZ1C%Q7Xո' YtAL$EF&~QxF7 l 隉C̼"}=Q T>JTNl0|}IYCL)muS}ANIS;*EJ.NhL>2$J))PBYZ}@{B{#l1I_0Qܮni‡DPT&,$|0 V֫S%zވ7d: 2و $V qQNUXWttlg;Ƈv~m-m?4,i93 [oҶSsxg]>=khWSP{/e (فR )Uj)(e.YW9UjTYrO<ޠ|V Kj) }Ogg?s~~ݫr^b]f~i8QetzZge?uō>H)JHu$H1`KHU?ª09W917練sKO_>̶K_>G\Q\W7{ l 8'.~R0 OY/zɬ,YvQMggx U= L}ًDT4t>![?vbmܽeۆ~?Kfy~Jq8{6+bL?'33ٯl/y_Tyʷh ~H 8Qʺc4JZe G} mmqPvcӣhG\ÿEϓXc-X27D?Y$$ &GJ6aR~0:#ʑ:cΧ-zi&.pӱ7.Z0Mf믆* )IV<ؑklf0rP~&1\[hwj#}n6rqط{a>K|IqDlo~{)ґU9V8##cnO-K6$;8FB?Y:&1B@V'hA| Vs*qrT1bITsD6%6Z%TȨdIr(Y` $0dFҤP̃-  $E@.!b3qv3W Pr?VNo^ tYB]̗p*T)y!aU3#UQ:>Ðfy},7;͚u&ttiYS2]h6ECW3c]}SྟX \[z 0[p&IY%Md1Pf.EPD%~ ;]hhצ3 ~[t'=aV:+*kZMFz{pTz~zGzށ>?ݧ?ݵp: c:PJuD^srC/5J0ebR4\\l&NՅImUV(bI) m=rƜ)Xae u1 *("QJ,m-j"Ll[د5kR/j̈_7C㰛=}Շwyӯiꪰ[kEϹr7k "pW!76K}cwmt꥟bT*[;FoiX؄WCVuUi=u0I5C0a7w+#Kr/,, *}J vRO3 JDeXX| ۬FBhth hzϠBZ(bQv:KQ'."Q(B16DĒڶ:4S:9{zG秣A&/GQ?r%޾ϗ0Fe)N&}IՅTG3="K *[J|K"5`|/a܃K|˭>w Gg/&. sd|!lD+^J/їm o|ᐶu  4(+NdkC抋xtKёKSv.Z8zs(ч|wPh AzjΛ~(a.z,$b*G>^k4剫פo^s{+LPBFtmƃgSJ.Eq95H%Y {#V*z3xwS̭O9BgW;U{l8![Od~Ct{c?&/SDtFI^~L Onn}+DmuQp *ZMGp5v6@n,γ+DI{J hWq$T>ܞF_!D s25tO[Fjo;2!VWA'_Xփ0 7}40aZRbO@"7pUBD'ݸp7o͞q3K{;:>|W $zfp+lv{uYrz[n?lSצ?}b=.$8<=;>>8xqn-] ;-.$WQ=0_w' Bf5q('݊??xh~Ny\e߯VT]ȹ(e璘*geHDetZf\$Ȕn [ نj3yH$oޮ9/nǿu'oF'zq>wcFJI!WR>5Ō8  9Ry뫯KlBlx!ܓwgѽ-\m M8c<"A6g6!\Cv-eVa *!|O1qQUL!q4bf5c),gpϨ'A|?Ӈ\חl$JU&\Fu*L%sC g<{ḹUUm4 \5Wh%5ۆm%1#HdglQGIÚ,nNC#w͝|fɨqu4~1Ρ)ㆌjR Ғ]Lf&:w$ڙF>_zdH `)DCS.AZ2/zҪ}R$޴VwfMɑ;R31K,1 Lm1XXudá8tGlB:K+vo' L֙/ ~ F+U`}HHf>gfRxc0ZS0 ԗP\5 Lg_zc<*J|dp-!/d -#Q[P ,BPU`2. G5td-z7ƳRЁԥ"`HH_] G4us66J1P" ,Ud@*@n70(c]KsIp uCZʝ7"2ِB&t4!\ҚG" ʗ&hlm<[-`H7ge4`- кzj`B]آvBͣ7AwIw0c¶oYv` DPBfEs4 Ï=Wg-"uZ A[^F7P!\c+`KI$񪜑3㎕Kq&Ӆ 48'Eܘb b2S3 j+Hݚ6 |;=+ԣށLڡ@pAJAJOk*9 d}% 'G'9ƍ[Y]!<ϝ]ik'ÀeO ҂3_ n ipi\PHdW՞kw{E8R XU<.4CD!yP10K`JxIP^.H,Z)`Zͨ. 'dSgnsv!wI@cUqqk.q|] Z{;D}! 6hi܂˴߮1ʨC~툇R=,)S78sfmܘ>m/y̟|dQa@sq%BP;hj\|N H@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': @VOG!`8%ٛ#yhWpYzN rR': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N u^'e@8{BaBQzN uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@R':^͘>9XqgU`w YMF@  : N uH@R': N uH@R': N uH@R': N uH@R': N uH@R': N uH@?ۥl=>Υ7]{og0̩2.Uvc\«/}(Њƥ~<˯X2WՏF };3 o~th_7{zsGnqZh|\~ǎG(~>[q X ;\͇:\()\Brn\Bfߕ> 3fjFyPhWs(Bhoji_jFvfQk+wGp5W3]yo9*K[#d'fvI i %ZJU:mYm[@HVXE1JvJ׮#\I˵O` \WH}O^Õ• f>Br}*pJpT*gWdCl^!է΂<ΜrWg:K~z^^9c"$?":|<~ zxTv?HCI5Jx1UCI ~dbN9C`ˤ@@ї ՙOeBٝ] g)|.Jא (ƣ~>ئ]h~^.4kRQ@a"!mq?ӭFkSoѲAcAPquLArBA9y)?..KtS?)|I1=’x偍Z}Yb$l"ÜLCm5Ȓ 1w@(˜+*KoaȂ`^'(+.|fRp}!Uu1 zɑ`t\K%ʝ_'G* [rВT>!O\dɑZwJCB8gNfGB،ZC6Ҫ݂+\=\}]Z~0 zcCEd!̍Z3 %LHpx10k?,CO ZR]..3E9|+=;݈8X ld_+^vcoL%Gh2M{%i 7gRB. A ** 嚵YGj:9s9u9RIk|D|\1&1x'.`&M00&avxAO12Еe:r x~ޛoINpCj=u]¤Mt:m:r[/zd|\ Ӽ19]v.\$հW!Qw|~]TPL%a3Pݬ-ᶎS5k3y 4:YQYiN8GRՍWb٢ݷ {gurץE`8u >8=֛wl? ^°5dkr}MMհjD~eF`X "Aryo|Mㄴ2r}F6V4La2J3~}8'e s!Ki[Y _S.7owB /E'o_}^Ƿ'|8*Nv|+q=0N&F$BnAU|U۶fUs#nQmzᷩWz٪|O_'^ ^pxx~OӰ.{1cAƱg 3aZDE߯RńYF8 7b94fnuD*iM^^^o־]/V556RH?IXI_]ob%]PO+9w(ѩ E0M)/o@c4io3Н$#X1\mO #W߀r~MssbM,OIL kywAOyZ4) ^sQ]\nUyj!U2uHey]\δl2n@Ĥ1bƹlq.2鬋a:JpYq% v64\lɾ֦"ʢݒvkU&ZXĶ6T--/p;7CBЦPuʭv_n7wJUk(' lkŵfu_8(4;16<2&wp!B .h19im>j}m xtY&4l^f3xr\޻$\<3^_ؚ[ۋ'7{@d ܣ$s]!KMZFY TJ$j;bm9=`)IdYt))* RJ@mdlz-Q5θ+v=+)¶']OR}=2es~PKq5s!A1.z#DRifУI;2FxXxaԋ qW5+"Qqq!ZEDk(`' Z+)Z8F$<ftʀyZFȹpZgk\rW\4-wOzbw}Ye9N*o/_^kH}J1~޵]{2߂_ˉe /k^gOndgpJl^y$XE$b[.vM@q3V"/"I٘GJͦw<5ص:>VN)ىF`&bR׮<}? $)KJJ{rNK H$!2:ʲ#Ԁx-i1 mP*ֱhWXITHIm|&=5ni Snsٲ][B퇤r ɞlR"d#Ar 4(T(,R{ պ^vHFNA֞oF_mC_ />r %zo;NŽP<|6SR %.hQy}Yb$VKQc2~otDM/mFJnk]tx-4)C9q(sXbf omTDj*Z驎9oeND\;6_:"]|c>u0*Tz1p+?L2tE.·6 { 8ȭ3>Pzj!~QIT rQQʰ@g%gtB\|J88S.T* \9mYRIw?Z_^>/jճ٫߶5mCj=$h@Eqr1z;31|?f|,?,,8_l8Z0v7u0۟[7M~06KqqŋZ\̒;Ӌd|/`/}q>Z[W xݝs55KeX{nm|zF?/PaI?[X/`!&H!{n߽t)lw|D}4:1q9iR#yMѝS`7qL/ggG>׵i (1՚ í{R߳sK{vfy6ghdAeʱ( lB6N'tdlZ Ud #Tu\@r(TH rp^\b qtbglq5z'l·s"ay5[[Pjh6f)'Uoӳ׫;!e)Oa5IJV U9j]{m\(nc|^JD\%W)Wr`&,TDՌ;#a g9>?=UWvVS+zweXq)a[鎆5+jUt&@1&u%&ueZYwD- z=>8U,")ilrj ֲII$8WAV2^Hf,֞V+ *ڡpl "xX:#gC9kw f-cT-51!%7JPߊ@&O Ԫ@xT$g #aՋ@"p^7oP$΁ !ڤ D)>"Y 0CΠMGIj=_z.l1$k]zVHVZdMM#YDzёٶ'*N[=62~obm nMZ&[sږ(Cx}1N+g{m/m0m繩EOE:c XT6BTݿWIhL$Lh%)nFQ+iet>3$}09zNTB>Føbs>`x6][pv|Y:ZhfO8̏Tp 7Yt64j|G6 9<~O܌y^`dwjvU2R]QӕZz5]\ҫaU%CVm]WWE]Ճ{3͐Z$ )}0o?`"ܫ-?2e 97A6EF1$ %,EWk 9) YxW^p9Ŝ#o{Jڊ:aL`Y2~c:sע8*=Z6HTP雍Mߓ^=wע_D|fϭE}L6PNA]㕈Mt)IWעJhŕ+}v*WUrQ&Sn{Uwy=VOxyl"pC2MDLdR %X H?DK+v[@IHNVR"Z^8n5^r<ۗvCAɔ^f~m3"d* $DuD͘κ|(7+^!:rQRkiH9 mqV#y|VOO?jUQڟe6÷[c=/ώL:Ͽ_8bQupy5=lkv z1=eCu'p\FãٺYץMtU0]/(?k[u']{o#7*3|bd/"`g"Ǿز#3oc$ٖh[c70cVȮǯz N ޕ\eY`)N{+XWZb'cM7L_;G]Lrݨ*+-GԒ y!u`q>rPUk<8T蟟<P<ÎF~ᆳ?wo<7zf^iNlXKߙDdy=Ck; mtY.ږS^3R,ǝ,m/@Zr_蛣I՝.u\XƥdGt@0,'04DO՟oR/W#PBPRb&K=]A8_'m=O-qgF:_\y@tU֝r E};C2\TLYѯ6O?t߮CoY *L % Dmnԏb)k)/ۡYheV 0 e5ZEC>)1J]|>('\{응~zQ#{^eesVa9 )| ͚E) XU"v)Dp^ Pm]3ئݍ]>ކmcwyN܍I:q+0P'n}8-W'>VLKbh4yݿ̎;~Og-" v!%bqYR2h>?X=~{Ox:<~21PwJsp;Ə5xs#om ?3YNn^{oA(։zv^UdѤn%!A*'i &#jp9jFTTzr„LOQQȄ@pAJÎ+֚9k(Ita3VPA>xey \Ӡmv{{,bǓ? ǧf}ȟNO ^S̢0@  *$]b楒X[tHж)F]`jS[,D 5ӕ\ 5>%"|OFZsƎ#\njmXkAkvZ\ZdL Px 3tqHSꮡ2\QH`Ύu e ek*5G%v >lFwb،?ՈX# ]Թj*VS2t2*$EBR$eF(k+o ٯRLV֚)'KebZX7#v0󫎬'\il^^J4;C}ч͸c[}-~@-k®~b2'S?YDx!!G/C|)W~۽-6̅ JCat d Q+" }*TvY%Q6hJhk$1+^e9]</tQ[yVG' &׏I 2%sQrt6dqtR-JE*il3Ϧ/&-TA#kcҼy" 4EɠGg=q#s!g$oyPwDbFD.^<^὚=Wvfgsb6Iףpim1Bt(<\RTֵ {i7!mbNiĻ s}݄;%>Z6y#qenko$1`edIw A65 >Q+;`¯f\R5D.hD)3GBYtJ{IShՒ7Q:*D.ո.ZnUraloƟΏW1Ԧ<<:}>~`Oϡy{=ǿi׽ٻњJfr;uR.'/L,DчY2p6{Q^1B6R=ZXT',ob芫Uc?nL ع陡UWzW~o /C>9_DxO8F2?}n>, (rM\Y: Ī\\#g*lr^qLFيDTU::[y,\Xpviypa._۸ÿ_R\ xQ$F|<̔<?Kgkk2k̍䗤,ڣ|nꝾdi;y"DG ic*]M8p<ܾNNf|>bxW>chXݾTH,%<S`7qM~}E┏VNxoj% D5C=q]6mMͻv6p8kt)bN9Mm &(ʶkZИ@90r#P49A"*Y 1]C(7NlF#<1/I:[gs">v?n^YpU+ty'p>=s%'QN'*V8{IFS噢*>g#13%Q1xfoRKdG`RXr`I XJkO7#v ^OOe]g+_CMeV}|"IܳdKStMU T] :P ;vEDv5q>GC暖!QB JH"%x@,$Y[z2#imۺE9 5`DYcJ91L!ZCfܡ(}Mپ| F[CXEoDG7kjUxt, JH11D*8fQaDpś<:5wG[['hՌtB< GPdd_L.ep9QyX#)ek~;2v%h^Hf,jz'Tf6%U !E𺱜5#rg f=Ĩ|y_2CkbR)7tQ%t YAZsE}= QH3*#C! t7+BBDS j' ~#Zo=B)+W$s`Jjgj-l->YD6 پ4j4򺱓s#j]#໘iGѰ α6"[G9Bm(d>b(g{n mvDqj~ѓ}f,·(r.$A{EΛB@R݌Q*9ʈ4$}J x%n3V:tK=TÚ|K-dD?Y$ &Olh{)7 FHGBy7mQ+H`2@|@Ah2k5d1 !;|JRzԑA L$ؔd3ٔf3Y5v>n0@ Xodui s8Ϡlƹە˛4ZBdžpeJl޾`f*Uiܿٻ6rdWm~ l2` ,yrϯ?nɖ/-R˖bHb)dXUOFq~rGU[$22: IڬLQUY *G4zh6] \?t\TBKk"p I!* 4roy:%*EƆd'k)p㾃oWgҭ3kc[QΦ 3?n=ߗsi[\/3zY?U:҇LTJ)+  GctpE+J}=/'rȥHVN}HtL@uT C I.*2w ֹ~K]J&fV ^P8Q*HY"!X(j%,UNF7r֬/_=^\55!tays 2r,5zؤý~F;.Ni<2+>]^3Σ%r)iu ns]^?+lݼn]w9Wtzfuvl:ĖfunM~o|KF -\";BZ/2[ꊎgnjJ,\x%[sF%? c_+~ [͟6hX&X *v(aZ4,Si hK cR(ۿxe|㯍)g_.IHY*+cWR!zhs /&Ѯ!ĞAr|CM&hky: 4ȴ!1$,he=$Ad4څEp.r")1Z$N$BW*#ř#F*GMBc0H(I{Itd gLkU0P-W1@CFbYj<)G=Jp޲`荜5@|A N$~K<Ḛ:el%/5>/xq1 OHyb\6d,`h04F(@ShP $$c-3"z:EO<ߝQ&| "QJQߖЯ&p>=K}FWnds1.P;K|Uq\(MpbSR y)heDDV%C>.sDz\VnS eiq阘7ID+"o9a`o:/3V{X+51Iy~F"ug&F1m2K ]\1{_epADrSr6s4 O<7%o9B!ag Tz( UBL |,s4x1%,8؋X_~͞.zpPr*I|{罱9%C{EA9: ^^ށNBmols'P~k(-:;RN4 ]L+~|WNjX47O7yL~6NjC-gДBSPh MA)(4hVPh6[Ph MA)(4R2r@Ljq5fj-wW^\/(.~*U@ hV*Y4fЬU@ hV*Yv6KBSPh MX& MA)(4E0BS MA)(4SPh MA)(4BSPh MA)(4BSPh MA)|BSPh MA)(4Д|si%(_(.>l2nrJh[ ous7t+7\<쉖,W?k<2ek[c™5.*HrJGrs0ߋkA14}brώ$VE#OyK`ß:S xrۏ[Z_ Mn /~:Wyr7wp:s&_f>!TXSYF|In3ogFJP>W6PHl/8MmLCcx"dn<JP!4x@rJ$~($Vc~ХH# '7w!\F"V, YӾpa)< 8-|G`z%wbIyhuЬeգu$ $4%*$z 7EYЀ|Í@c$9TIp\twjИ\@ri֒'0-@'mp"+$X)!#De- l3fSpS+f-uF(WR Zx+P'^h?aeuϲ1TGZp}rQ.ZtԊneuΩēW*y BJPlKk*s>wx=ks|Z q oǝ*w+^g]L>͘ót i;Ged]YJףّ}2e4"to{ ΗtpOl8n`x~(>O??Oo >}-86[G$حIV_1kho>47b%gθW>?ҌC>nooǼ4p91"f+qWldtX`EC_Rń 8 1cvr[7hP u'c=&ׅܪ>I<_NײVMRPW4 ùqY g;'X1\΋ W<)^AG#![ND-Ŕk˙7!y =ic(Sߌsհx;ٳ:2i+f&\'κQ0A 糛EJbB8mIq@ګ&YwΚ6nC~w_yXb' W]c¸>n$kf_Hc!J;S9HB4Dg uܖD&hMp&2jY$ަȒoCF>z+I Dev9@ k*c 15EE9D- Gu)N %"*v* l=mh(E|&gh*fJ~<'wzܨz><_>u*c"+TFdoJ)p)ej~ssv-媗is|9:Fb6>C )K$)PYLt-5'oڿyzr6: usX2w(MHqw7PhQ" $\rjMFAe9:*dd!d8,i ] #MƠaeZ=[EExŢcwqUj_< w2';Ӌ&8,.r ܓӦ ^.WB4=5EHPU*@Udy_d. o$DRQ@@4 Vk-iTFHeB┡` =z\ &%t%tTaBo9%r-Ydao;cSYbYkte 쨼>l ŪjoTn4| w.5I!L\H@ TZ2<7K5xBۋ;npÓiؾ3βJ^tr9&&lV%|=#}Zb8,"{Jm޳Ej v)XQw s# kh*1ds0BP1fՆz]0;_R2jT ]f mzɭ2Go\C^Z~w /P_L-sjE Coq&R{h샧ܲ':Dyx Fh:'5کl,Gg%xGKN8<KL+&;=|uhp>~=Fy7%Xx%V qk3IIBBȘX:(W8O$uҚjM#w'ìDmVz|[c|o͔8Qz/0E/?tD-Vao^ G|g5IWxQQvj&j?tfm˺jOQxeR2#wRIBFb#NuFB )mleXN!mſI6ȼ?|̵Ja8a@tlE@cAm4ATqjoA*ד?@ml,#27àj]E/78D/&ݍ\BX;EI^uLk`ܰ+YM>XbfIqֈ=)nxkrכilǟ |OZtٻo? E=Ԓ /èݰy@sڿGL3e4N59u59f>d9+,Z(97v2HV0N;Ӝz\8l9VZȱӉH:~sotۖ?1ݶeJL7pl:dy:44$Ϋ;<\by{t@NPJt>а.* B[/, !NOPE#DF;ޫL c&5sD Ug^30` $wyNAvQ#rXb Z͈Vo՛5,E=sKIioXJ$ i(KP F AEwD\mB]?f$Q@Mr! 9V$Dz&#o{"r 94*ֹet!ޥ`#a`&QaHvŽ&Rs}-Uu19ҥਧV[ oWAYe=vt-G7<35u$OrZ$.zztp5ף^wf+jb#2s* L' zuAǪkO'r+Y; JfJڣBG$x|OT(#0Ai5D`G7A0-d85>K^C wbngv@~_RR'c6Ƥ'XFY>ڌ% !eQ"36R**ۄJs Xq6yt $p80&4HU:a ^qZr2w|X6!L_S Y˳žs: ,9,œԙӳgȜ4U(.lNmע NNZ0AkCOΊ$!kՉמ9 SkW{Q*T@ O$ʁ+"r)jTsU D{Z !*VW8xKeL3bj 4 ]{GSJ$TrP5.\2y'qc޻R5;/ʯNE6QʨMZgN%8JEkud+,:Y[儶y)Q3/D>Gch7#Hp-7jGWHiPse2S;s$9g:s֖̀pKbɕ$qNYglhg]}}f *>Tޜp 2GFOL,pm0ILCLVQV(|Y1.MSzy' sc8u.r[j x"$U:'q%.2 ^񓊟t?L'U-dJCXքdLEBDT(GG7קR1M+@&5F,:z jI@%!o}^$4EZIɬsu# !FEN:"7ԃt,߳H2EQw3QŤOu bGw1cu\9llkc0qfze#uH=y.-|  %6HqV["_rB-!W ]pq$j댩WW{%0pg%jw{%$ܲ3\DpUz 9&DKy &8A1-tnoGahMZtS)@6tA$5ϊH[y0i*僋cAGAG*J ܌/{qD`W#׏~y耟9:_Vo+W$Zj4r"oԻiT k#1W\"/HՐu6Mf[{lهrkܚ2̭n[sj zN\,$F&*$|k\![K].sBFCU *A$5^(6n}!;|/reLɕJy.2YZmO=W&Kiyɕy2ZY:WsUap~!d(LzQ$ i[UCHQy!տz`>7;B i^JA-%\ν}sa&>.#څcy4Mk^[AH[f*yc~15x1t)'[\% w5do:!Ldzs\KowQ2:j( %o1̴,h TZbk/sDy f'-SŊkʋdSnɻc\'6l<,əLpNݓR=OJc =JK8*LWYZAO}RJU'3] UO_Ƹ14 u ~\KS|Cmj͹^-zj:|vBTA4c+&9. \ ńOS"$RXL.'qg 螒S_ryw}݁bnRmn gnt#Tx P*_spŶ8Gr t\VRn|zt&O_;[I3w5ԂXUA@C[{UvPfc7W՞P(Wu#K$A(#B{4cRȁo O ed&(=)0RhлPS9QDo4-ȪZ<v735uWx/JQ"Zrl`ɢf9z,oD"3Ԃ?U`3u΀gGG/L3c2 @Jtՙ8[Jx8Ӻ8ٽ'jB[T)xG,-~LmF۝MpXgMaCEJ"B8K{E锨Ea U[k4Ow{룿֯I 'X-}ϗ3߯ 6X ;ԣn?DX$O]`8t8](e-cׁpÝAGW0\9$('zH r֬HVFN}L2x-Q%H4%9`ѻֹn#5P21Cϻ༷T,N޵#Eȗmlfv3de,`[$Wïd$YMVEփ".ygQHrƞG_h:rl~r`hazjz.<0;\xѤ^Bg욮g]/Mp)+IpPe 뙩 )cHl]asm{QE jVf!e5ͻ]5z^6^̷|o\7y6 ^:H_]zy1c[^M7 ,yֲs^Ӝ ykV;427,6!ۜ{iۼEF/YKbJzGԿQKsG)sg(ysPdBQN'>1S򞃊P~N$ Xᄌ fSڣ# e|q0kP#?^NV˳a9OX}wv{$^x8M22}H)G ZY2@}F+v!dQ8ܦEJa'!÷ř#F*GMvc0H(I{Itd gLk*+AH!ǧ ,AkKΔt #R%'[A$>iGs}SHN78 :Pӻ)ǕVS>d2 2̊'mX$*%`ͥah04Qш=*HH+2g<^t^|u LhxbLqPڨ7q:%T.e$ 3({()y2\wpooS/9^s΄bw!"ЦO691R UzU ]cINK}_B!En?J (PTڲ]|6'^*ij=\-;ո@t<\kqE41rYq LYHD9ܢIZMdK;w ڱ.W6>bwwon7TW1MWLLxcfMG5.[`eLЬI[;&h#O <a_nd[yţy,G2%W/ŗoixrz$wSȗ >+I&О'`5s8Kʹ$"Y8 6qDkl^gf!4R DRA+>'d@r)+AT#gF/~Ӌ9T>1n>^mӇ|ۦWA?7<FCHu!4(m40 ЖfZK@hôhT&cڛFSQ"CA,ZW:{Aca(}/u#EO NZ2G\eKe$\k@O_\[FPp}ryQ.ZtԊ2:Ps*J^B3ͥ}5uiϤ^ugivL响̯?t)9GJ~Kp.Uvl8$aԍYδr!JIH_ C{ĉ8cL8|8E%vpINpQ8p:^zJ jlQ$:zꀐ]$VyNk!7Eq4S>. 1 ,^.]HͲ6DGGGՌKAUs}-u` /^k}89^¨^L菃[3k=h#F9}ytb4g9p_t><9m8$ƣMwz=l]#|xaX0an2GF? Y̻xt~=z9b8]rBQY=%FzWYi1jo3\> 䮯8AVuFש|E yΆȎBUtû~pCۻO8JV' $$|ǻ1-547В|لo2*CbƘbm󏃟|7 ks߽'_8-w, ܏a$m̺h]0ـШ>拘zGAs3Mcɭm$@IswZi8%yuN2:΁/\ݟ.$H,bw0@n\5 r24̇ Ko⮡aTKِ<[g%B!J;S9HB4Dg uQL}ty5c "KD{oSdI7IH;N}V`#m RRVn] $Iu&AU Fzj12}R@b7kQCW4P@7|N"X]-CT!H7l; >ze,bFAѬ򬾭 -qM¹U=tvQu*c"+TFoJ.p)D|b=GHie єYy$*+)S&_!D::c5!Z(AʢNQ 1Pcr!BD Dy %,wVz=SQg1r6$\rDWri< hŝVP$@č#G4H5 M Pr jD[SB HLϒ|eB!ɞԼ ^kb#gƓ|R_M"~Ñ\W0ޗb|eNs^4h:n:\m7ou>ris|>|TPϫ5Ai|-@ 4=Ғ99P ,et -@M O!{.{ܬ6ҠW"G\ U b"p7ӯ:J^c(01Ekm6/y{Q{9Qa5[.C \3LTP+sK*BFdHZF]""38JI qQsa1rƨ$Ȋq_4b1V#Q׈8!\{cxpzA5%DU*p4Dɘ*ĥػڪljBa_]huh=&pтv= 8WAG!IQ܀w @ Hv2FzXC[G[/,@I 1C$NOP#] )F3y9>xϢD! #?X:PvDn1M!e7^>'IQ߸wmI_!i`qpn&]){qUII"[|H4gj~U]p"dAkR"YF:N;\ieȣIY$n[WR^PW-vL,hKybD->)#'BI]R1_j׮H8j)K`Jgsf$z QnxֺZuSz3sh;p(4}̯MVkR U3]]X%"FIoj9tBE,q{VheǓ+tԟɴC!J!qA:y1'U}4EUnc|aV%tmKڭGL` fS@Y3ӱԏ(-1o/=k#QCvsoe;%swypIjRvϸ 4/iԨ>`DBohlT`@ѰL T\pFY5تYek``\0&R#h7&TH⚥N :Uw_ BڴOzÛ1̫fVk4G;¯v ?˕`}})YGtUhN\űDj$P]w O-@l<qUXUVCWtN\q--9"qU6hU!*>7t^\N\Bqel:qUXUVCW|Z_p-Im~uζrӺNg|un'zLjyd?덆HyA3n8o$qs)goS{?[G$`oGJr['. BvRJiU!WDgXUVCWJݹ^ mH\N sPUv.#ĚK/2ȰA 6^~?6g7mA gLדE{A7H=׃t$NY/\~ н'?|BJz-3^㒕 /(1WW>Ův ڛ,Pͯ?Ed"U_hT=wN r?CTnT\OHflRjA'rsRL7\h(NVreYw[;+*zDν9F&)I>yL|B KW4*[̳39gY2,4\MCE<wϗyO)׏*.kё $$Ҥ#zQɪ$d}f!sS{Q(e;d2),H7: u[xۮkpFNjё5_XTҒ\.uBZωKfE/oQ >} 5y݄M#bbn-[o:ZU>}˗k3 >dS2ѥȱ5) IjeL %L01Q946U}tB %*a*e3 oDP"GE2}tYu6W_^4ըiFķ~>o al|:fNk_y<1f$ɣഈV0"Dv'X_sMMI\)B#b,TjEUڅ }gQ\% oFF kc+ "w^{P+ȑe.4L  ϭ`6bZxhB$exƼV$EQ+kCj< =DXvf/L>mh=;o䅟Դ6(:~˭]EZU1RQ7ZǸD|T9 @:Z!r,mQY;zg} ;1#T'j;? * (9cNbRVR*'z\H㸍!ê|X )g6rNv.Y4ئ+K:RIee>FΚ|fF=Q.xiy2\BVe(y貒dW"6*@X& |z)&א{2Hp <( oe_N|%470$ʲY@jUZ8Naw3b6k"'yrqZ+`'8* q\_m)6V).ɞʖ>&7]#uԴIkIUk V@eVR}8-񬓰1`ì'r"^>0N0 )soϧLC2f $\PZ@e nUcT6ey,Uxh,Z 1\Qqւ9k\$I͊cR,1!y<_yxVi?pjn3{5{i44~'Xl-Dbi5@+d2QdpL HK9;SG G؉f<ff r1f PӰg|_:V+>3I8藀y z|{za4&Ǽ'ؿtq.—^4xީ H!r6ssUg'+u*TvN΄dAAn[D*gA`ʙbkʗwYɹW,6 V QgJ3<rС*+hBG *B@gAeWq :k:q˪#.'|P*Y\t\{ﴷiWH7AItM@;o|LB+g힘2v[VG8ux~": _ Y'봶oVϳ90Y 돀窥u^UmK 'fr܈ݭbuJރ_xn{<͜4,hFz;wϷC޵m$WtDVkƟ>L8؎\.w8?~;j3O<_m;*MMR2-eIG@UZ-CMqoNoeх6o.DrRha[fC& ;Et|3-mEB{I:4]{Ӈ|q}_0_U{~3 S$sJ/}ӏz*{vޯp>yۣ2ݐJ\ZxӊܭGY<}9iqZ+q[-mQFcC)6*$C]w4YTqϢ({{th֬3"'v&.5SYzA>! Zn*8D#wɛAH9T0^ {9 r L.nW%pXH/];?_UpXĈpX0~ʋ UWu~^i՛_jq4 @}s? hVH&qEho+mH@_`wCxKAЧň"e,/qHlIMeΰϪ_]eeʿn#V14f3ZeUwFV&1Z(IvNsdP\(wp)BSϘ8P=x:FY^1Tk%ix 9 webjH<N8|{E1mXJwO]׿x Z[(~ts.mz 7JܞLz djfz^= IπÚTZ^O{"GPo))3*`d<%#![:DeIo+M,[n{ad)A<{Ynt 8`DI seY xkkcU!D!e!x0k5f,`ZF h9íHũ2[$ gôjl#ji˜$OUu۰ٵ4M}y69yf2z5MO͛_6[SõrhU4$ >B ańVk 6պw>]fW-/縄ٴjK ^Ȍ ny*AS![2fzɘd(c[YHm&uB-C7~4nyǕ؎ AXaC#e2*'C,uX:@oR΍5FE !){&%$6N0+D{ACJGt`ZDR!tYbgzm=?bFJmYjӃ> ؕq3Jhj5432TtyO"x/BH[KM^y 1 !fE;$$GQI>9P4&<6RlI0DFJDY"D\ܛc[y9)",cxpQaяO(@я uk 3֛^(%^ +?}(e< eJƙ/A.>P @SS|hdG qՠ0YzbL+*,/ XARG"y냉5eDDL ` Xy$RDӣ-r{d y^]tN?MZue[Z[;~ΫZu!mM gN_QޞRߣhaA{"O2evկďB3im}ɔV&ȖQJ-L "= 'JJ#bB: =_yaOynxoB+fG9 ee `>hH"ŐsbC7к1t1LqG u+0IwKNX5x)µ$ks{~ gCrBr)/Ѩھ܌4sז5yY]umKI:[eEs ɍ3$2v:Ebb)~P/Z$-:G:o+@|჉J P*T4=)T骃RafXTqXG$IˏA7.ը1F/ #1Q I2qb J\ ]c6!'9'f]+$WDžo6v7Q"LMW]J(>9b9Ҏ *Tl¯'>ah.-JhI_w/ⵄ~FiwF0l }z o#23XpT^˄lxŷz piQ3,&>IFM.=%`NRJdXN\h15<h9&]F۪f>N,&.HDSǸYZGJ) A""|JgWmAQQ}, | ^G8;/0.Tzv.enaGcmB 0KaInv8ыVims80TIp`*lϖƈwkk<.o"Wrhkzo.z^9}YxNht䷫fNl9nNjƖYoTq1Z"79JɧNrZ''f~jTlay `,9N9YiiG߆THCKK)FDoAsVyf/*u4;jTR"-D{J.Z@)`x8 /1^m"ړM_|lw\E8K R5>Y` 5.X)l U2 NK|DܧQQ90oC/$f<ӒfsKU k9;~.N*1A4תN:WD=ta] d?Ϡx sZj)Z2li%% ,;DaՌKy?V3ѣh53˯H,6Jd2B-T% yH@IqB ACty5j(!BjʉC̫R?}<Y~A"١& U6`Tq"^ʼn]'CrV* <"N}]5NcZZo<7WJZʄ2a^!X&%*1j MjgA%3'=<$D:ƣŽSj kҘ 4\Z$ѺhIgcdct}^fM/]f7u  $Gt >;dB2IZ0!#=":q)WIZup J* %R.z63YZ=8=Ct~_dZCo}wxIe3`rm۷[3Mx{ola=dJ/2pŋviF<ބMyG2-c1K-.wk֯>`P%h72Zh eQ).; ahU~7i/)Ӝe~药VlL( N1[}vdd7m H3eS,`3,2FYQэ~Myvg#K`RGQJmƌ6>f\n?(LGɁI 5EN6Xg.ULҰ`UP5QmGph h*IKUwp!1 Uc$WIJ\Q8&<u4pX*IKLR.$T}DpK \%i9>tJR]ET*͎8U0 \%q)?JJ~pRb;zp%U$@WIZtpJ2Z=rpW N;UC$meJZY>8SG 3RUS=ASJV糳{ݣ -Вvv9e(Sy&ʭR1E:"ސdzD2t4KIZr;ss"oМlsWJYO⪣Yi1:x7'Rwpk8,mKǺu-c ݖ|$:u5I{N=ԋG>ъ#*̮>ZGu2Q#i8p?ޟO?嬘벆 Y B&R\Q9;g%Z 3/g>3LMGl`Fm:ǞW%' yx]P҈"V AFC@b-F XHOpL)J2<[_81xρRӜaz|Z׵ed~(r.#:LsfJ9q@-!iXEٿ 0}CM3 XEF0-x׫$Exyluֆ  #Q4C](KiE"ZKA'᝺@|MI=5ܓ؋;|2 na&u \m&x+_yfҪW m&>l?u~C/l+Q.wN/eb MgtmbumӆIHf]iTjSF}”9yfz_+(s-ZR͔24IgmRYSgm#i IUp;o8 O=cF@ |2b,% dZ+I(Jҟ̿[#E*YNq*TRbxsi} ʥhWu&>-~:9>rZ:4^I[VVdtcKƾjfÃƙƒӜ4}I IMI5wC8kd(M$ZP#fz-oP,zxn$쳙l:Yy$Vr V]y^ʋt+%pzi&:RT\,O/VZ&[L5Ef^]-ˬמd@w}sR3;Q?Ra<.֮x9r _Cܫ[盿o͛lSo͠ߨSee6.+BpL}~dB`E6j`c:, \hD6:jd0bQG"`"RSFDD b FрG!eLDDk4LYc>)PB}TM( AGz>9:WݏҾsc~x‘"aL;)uĨ8 T@YwDH\q 00(%). XTSi` n >Jˑ D&HuL XH>ehp0NJ7(a[vyCB|@9!}wW "T]6PxzD)\Y`M5<]߿|d'|NH@^ *^{$劀3BEq8 N vYi\RJw &WZ绣<1x"0#a %92*"X AXwM8" lLCV'un_.m'qqVO)a`p߹¸Ts>}X˴sH'`bNq[y`3c]56۔AzNHQ۞]>/`ŧ-6@<pQiw;o ƗH7;Hg,D>\.wӽw]eJ_1cPƫ=PLՍDlI\,=T]0լ(g=֚jYeP7ߗ\Oդyl[bci VH mesT'.ȷ'DRFר2 =NmxДSKebϼA㉭ko]^ ox?OηQ:ƑZ1$ ޤ+ 1h@Ke6юW{Ȫ]^D7A.yv]ԷK]=`./yP.Iv8]W]gawRClb[5nve,ui Y@˘w M4Y$qy 1Ti8Kg؄ A2 ǨU1k^pp-a~8z36^^u s7)ɦws DJNcRɜRap* M%1`!AyK:!ܦ+Tb)Ww6RLi5\+R6(kR2|dDU8phu064:JA(cs"hy<`BgH6¢=b]Fȸޏa,攒ܤ`>ٷY?*yNЧg)qizY﷟?h^^JՀ'T9`F(b_zKdS>3v8)8F"r2B'0(SwbpkoOۓpY(85ia9p:*qBBo}wI`5/yh|==5ԝ^@ȇ}[Q_DggxQ%a9q ʺ6KhD5f݅?a#OD^ҨiNQОtoZ< {tֳ67 J~a?.},+?ii>c`>+ 64=hsET]05 wsQKZp PH믋 J+q?D kE&AICƹexZ6QcID?{WH\Пl`k;c?x㋁ "#%ZҐR_ wG5!S$%jUE1*#̈xl uC#9Β'%fk'0=n,{oiVlU MIUB%Ll`P1#5DFB땑e|fWG!d.L`Uޓ22b-mrZqJ f5n@ـ:JncbJ{I[W2_7݆:4iVnYc~rtϵ !GUM*DHG151,`bXT:1N$.jBseSCTQe1{EHUݷU6 LzB6rNjp7*uN)jEޕy'JDנ*L*sϮsOswP*&.! fܔ.ڧyhr>s3v.:{fn.r/0ϫ*D:|ݳ8_dN&ZevV[//feŲ-GEfssq!w?qݻ?-ˁ6[%!,ɹU.[|C!_??-5T;)o~8y~hgCG?/WNΖ_|Q˳G˫t|/8e]1|$_IƣoWkS/&OZ^\ӻԋ/D@s3g?~@CJ}+YЈ)nT94⍀o#ZVkj騋maHbRXPL(1`ߖIB{'ܘ WjKљ 8pr*EqE+XԻƣ9G/,%.NjkmlSƸ8;acͱ"Li}BRJǚs c*9jƸq`|zMag=3G$D粨9I"~gf(fOHL I6$Wgqːˠ Do𖑩>S5.98j[JGHI{ e3 +}ڢWX^]68k&e=1h]++(,ldYBPzՑAOBj̓w#1*GFe~Nb!9\ ;@Pap>֔$\ 7p6Ms D:͹Dycv(˶(U&,AELjbO((e]HJ));E? GGw?-yu~n5eUr[׿_;5sE5\PES+;N#rOԳA;/ʣ߰[)I48;50L8{6PVF[; W) \8$jE'6l6Ǩr@&5DT* )z,R#1qrJd:,+:Meꡛ?:qj>~acZw&ˍ@,RFk[LPc՝b -]TTKy8tݾdwUzo:Qsa:+ooomO..p*υ';=X E O+͠(t6wjYV ['`BܠAIep2gHkAbE~: *[zƇ̋歗F_\ؤ|;gg$8B=A(aǽ q1ibƛ޾fm.4.VzF7(4n!ڝP'TgКd*8qzH* 5KT/ZNSŷ-o/5PFiҪ: ҖmPI)  Pf }wTDs hKUH/ R٘he#BO;͜'h K =%m[旓*IrE%?DOOp6UمhHq&W SEW QD/M9 F]S=wf+BJ‹:W5ț)99vr6!HZ 8B[lPl4ք d2ՙlǘ e֫کTSS l;Y7sg3 l6P?<4P 6ڀV@d[XMlk` UJ!HckF__'=[$$)Y @VVͭcUסf"X7$C_G jӑ' 2J2)[̹MƜe>q~+"bՄ+/zz(ml|-k<%L[7k81J\n;F)M "=pHbFȦޑ:{GzW68]{+ K^~hr˄17n?qVi?02xG suȐ 2y< IvSS㈎t(>{$>鑫k\`A2*\hr ]_c뽱ޣvc'{0vz:yanO#Ҳ&ghr,^hruBnpF1XT&58DaTEjFglhhN_[EtE7f]$˪`XU(8$O. 5u]{6=Cu%.bryao{v-V=j<_syN^^A\&AEo=b*&o(%Lhjf6yt@&r]W0k/_gC1Z/ WWS12H1T y܃WWǓ˺.܋sKe3b垘ަ?&8kYndDZR˙[|NtGcf; .Y\u%)mJیf"Sx@ؽwbNމ;{'v vD'vQM'vؽwbN9#; ly7U` ;K딙;{\'v+Gjg?|i[C9{=7h B!ɄrADb4sӨաK/KRQaKN`Jq4#rG[I]q?YgGw-OIҕP@AYAD^cI hFO-iOyt ugEL/StJ1:$I] jVh6deu(Dms%P)P8Isd#x$EF"$IlT!2)ECHD)0aaIVL)ѺgMMvlY*%HGN> OZl)+1,Ak VL s-uɫRK"R4 &XIUxV%)P,f0P`غ1=5j7b8PRԼZ$R_#*|ͯLl5yIbZ+ư^b_dcY[LIX Ѧh}` dmZd0..AUXW66#X;;} ch ہGT#cn 0\]^,Zr^4ZW 12ot)/ˍA:f'KH:/y❱_,R Ri+GuN9QVr# -#rtN9);xrSwN9){dyxi>^W ^xiJ̝JU4BP:q>5*Py%P" gK PLf}rM;rmm^tD룔$@> =HD:k+ the1ZjlN%w@f$kJIgsH٧ DPʩ bBNK:m&9&|$[e iTEY::Z9)1|I$)Ttˆ 7BYZmЭG?%yS;KE&Qy098 kh αID8M,SZ $+r\ gQAsvE 5g֊z~KQ\*CkbRE)"ouQ)Ԓzh@Q!oq !kM (q_ >i﵌4582E`-!I"NA2CfP =NqZ.O?B{,\L)VCXsBPe6SIG;c_\:iK77AŠq5. !f]Y%Z.MHqɍ3o߮ v\AEp3jd7EWqoԹYAҫ}eQ@QubjR\G,fP~UZ-;Xqř6ycfy['kԴʧTOgHml. I+=YC<9]]]^]7~?r9AAA(utM+1V1rFb,MU#bҠjmP۰FGFң2&dQ8g mKWC*~D$c4J-!D0Px!`dui 3"!Í~>^]lAѽytv0:u]_8|k{lX%vo=07>xHxWk|yXn"0w{͚]F(L ChL*;P̕*ֻ'lH2f Qh^X:lӥH7Q?.g*U>UgYQ͟Պ1Pdn^ +|zGz52 5|׿?[` @ <4^sbpaΔj%`HD`2Q9(-L$ë@QKc[sٚeJ u") *("QXR gLܫd3q&ap^^I[֜xq=5;_mg}x}„?#׫|6Ta笛evԛUxW9G 6p;E[խup+=u_bo,㍧)GnF_rg1v)‘OlF6F"[M&/JF(϶&=OSzEG =q|=xV$vZP rL_B)A9K=KͱT$}fVލ~XB?Z ,P:;Mf$N^jīo=͌}&80:+ SJY4E@e9&e"Bp7DݍpHWy@fwgqugZuF˷M _7/IYSs$gmF}|v3֎'|`#s^K?mmv&GvVGSU0tl¿^?j  7yJīuқ|ur:1isyl"X /SE4AɝǣiȴFIzlF-^8imeƝ3w^ k)}nE/K-g5Ot89zd;˗IZ1"vؼ.j^36/翲L{qcI=iYk`_06ÕT]m4X^,Ӗ3lŬ|;x~ꞂקI pqysB7,Ҧn[5&MۼMFM6Չ5\뉵|);mEU~xvm#rzusWϔv7J "$]pg&'/rz-u@h;&Nj aVMMK:=zuWKi g1@ZNv`'ު|xs5\,2g[ ß^~ѢXcv J1mkq/)>LIyuOy;Pid9@oYuF2):K+KLoX) W$- QP7_˗ռ=\u[J՟okT"j]*yKBJ`*r&Ŋ,m?V 1p#m75OçŋqK]*=r yk98TL;~J8& ")xz(N;(fUH[C^ҚY{HZ9ympIı3$5$6 %ZF%: `j`RQZ&e9sJ ŁN> #˫M ~A 16o&VɫVINqॕ%\o/O# ;ȹSoG3N9%@cФ Brg1z9N!Rh-+e1$u6{P]:jK9 ʭ{ `0x'clg!sqPEj^WX' 7M)B;)| 7/Vu?صIِ ;N%EY 68 l%Y]o[7W a H&MhD,]~<ؖJt 89"xBj0 ^wռ.m5Ŵ} ⲭɬI{:[ [ٍbe7ˌ6pT`Oh8OYz+{e^\몱J^';>Cnb(5Ye1mF9ȿѨF/]ca Ȏ_*:}O/OO=yq]>鋧|30I$LO;U5ْ]?е57Aגl7W~odo faanqy~0~ן|y9[N~JbbDWVW.+ҟ4aDשb\F8E2LzdoP:ᱴ_-ߘ|pCOatj_ײ֎Rj(ɫSp~8wD=rvsOWyo_'I0 x$1+#1ˠ-{r~&οEҟ;@H(`<%AjJXỡK/zʓ :x>\?Eݑ=n1X^18pL:"GR8P C %:P{T'jva符aj_]~um->'keVc@q8sv6^bsj GK~VGydzZ%5"VD۬ZsD 18x ׿ ]@F%:i:a&"@ $MʳXh1l0W2swlN´Y@zѬ|; {P j{2pz0JZ*7PKL9MO\r&vRL\7-Ke泧ޛkr=FHsF\ڨ9l-A\yʟB~5Rm&k.7{/;pD"LԪ8K>-w}a! ]օ[n)=2&>,Wf잀?>GE(A3&0($O0 M1Po%hʙ  zǥs"IRzIPDRBHZcLH{96f@5r n?{ V\q@ m}b0萭ˁ-D().еb] qѺy=nP;WyNoߜU:1@eG (,6QVE?׹pst~͡Kie )O8t$*+)S&_ S3Մh1r]S8%3xC-@rCG %ՉKYJ{*{ݝ*{@{_ >#q[kcdbx$ G(4H5 C Pr *-]TddJ K+ 4($9skML,Y/PJԵctK_at~/7 ɖ >p:,W\U 1-/ݵ'Ҭ|ӭM-TZ*9ks۬$I9I9\(hId&_#Zhͽ#lUͨN1&I>(&N&'YϢhRNC.Q#%TR5c5rkzJ5]X3Յ.d f"{+pU\$LՓ߽к`.25v`ԁZ%=7COdyYʨɥ[ZQNC\duYmr^Tur9O&&,RDu<ݏ:j^c0.qǶZW]/5[9>x!8nuTD!p0bTٵsWWf>K*0BFϐ & 3A8QjUևYKP?9ϊq_4b5V#Q4A# ɵh9W 8KGTDHgf4J`l HWR`)}0(90f}ʐ/8VֈY߽\jzq2\O{ոd[h*EuЋx'lwneaY8{&qBu2\h$@C/MmZܱ>\mys*,8r2b#7Zccwv'hy2OP fes8'56;+pT}xԕ&ʯ<<?y&/.NE}gNkaF(o~̷ .ư~w;Qk_jqvR)EL^!wyherA;:}3=AvCCD|v^w;ÌŴ|3Wg /+}j6&:׎/ĕ5}2]hWPH_]Q7i@cztjZn8I:oxs^ mi9Ϊֺ\֥]҉DeaI$7hK\q'9r{Rdpپ缓ş:_["mNo]@.-A5$hI50d0.M e&j,nSz a5l_kbED? 3x 7;TVvh~5+s_g{_ka|6iQjn3>F0}2MSz[M:,EsNr6856Y;r<^-۾$ Oiƣij!̾#[J[bJ))Z—@.Zh  6\}6[\i?-L-`Rz3n[I&ZbYfя/xd;خ1+7'9.'W=&+E*Ns3PUY;47sm2فۿPǃ%n $c%_0F1.+k+n!Uvkd(Qׇr5.o\a|3l'o^Q6ifHZATAЁND=D JOVQK#>'*DPQtnZ!CʣRTZ$ qj|xԖ eV1'8D/W#gClw!WyNvFa3؃ufo; u]3oZկA1 cRDK%o$Y Rf:D~U b=c*t.+3&'IP0&:(T)ډY/Y_󑴪Rg+_@軲$n)q!]ӤK᢫Gh}c6JvѩbQ&'\+!Z'gE"$buxoյ {Q]U/ߧQDe*ETuLlS+vÅh}Jy*cFJB&6 O$ʁ+"r:|Ps0"Ts[M^{h-+`%2ٴG Sb0 ]jl :t_<̙;DvKh_ VɄ1hYZ^jFV9ǻtm*=O7}1+Q\F>NE6QʨMZgN#I(my(ܱV, NH >Y/Q?Q2 D\ ZQƕ:R2%Ikƙ"#5" Є%,3%\4d2ZxY^YΪ ¨Ww1*>@)(%.F+xb2zN & Oi#ϻOkYm\Z wMM[?ۛ?GwTnj\YFx琟qcӻG딅Qle, YKZ3[Z;h7׮$zezH<"OF]mtB] \e^sZ.$%'R(U5&A<"`%TIn<kZ) YF3i;c6!iYof~^m\ysAYՄrA=-ez󲵗඄LTJ)+ 굳tp%r9h"G}E2pCcrJьG.̷đ K\Εct1*!s1xA༷THY"!X(*%,U,Rl:-9Ϋ!zo2J?tsƘ=Hx4Y^7py7O.~Я3l̏]O]u^pv{6X;W.mν~43Uf l]asjzɁNm]ͺClYajNݷz^5>\YiZs16xߞz64h qϾzy*Ugb%i.^Ѵnu6/vQO]*͗H9u5ͭ3%)r +nk|WݕX@S'!Qg*f^!'Q:M+)'}Erh0xnoHLzkP2(wky)yAEm?aj'TVBiQIeKu%&CL$S @e0o0Az.GuEXI|>g/t= 1/WM22}H)G ZY2@}F+v!d9ܦEJvY$BKr G03GTjzGaQȌΘ=UT0P-W1@CzYjcb.nuwϜJ]`PPK7 }]SgyԼUO.0x5uVo z ]ȟѠ[0Aۍ$3F05m3OC+j/rq>] Z`:iR@umV;&7u$ٮ3b~SŌ:_[ݚn%}HGNQ5s)y5qL78)]L&xtx pV*'kDi51lS>_ԯP_ ɩ:zV) Z9ox>_Y(H/\$T7tsʯga&}.y s}lof on:oS,(UJ@eg&Ofd2ݷ Yth)k{X~$J9Xs|[_?xVAݝRqJ'/Pk/=>ϔEN<~ YYbrbn4s)tT>+<]SE_HILR-.\@T$C18&aɑ }=ݬVߍnV[/ݬv`u|4׳0E}p2 l>u dTDPuE# C]X@W+}`^F͘ ˒d<YRM!2S$؈8H9"Ѳ:P.lXUI$%P`y/(T:tDՆ r={&U³}ة|_ zȿ >:J3WG3!> 3uE%dxTD*W DSRP׉: F')O:(RIVRXLD::c5!Z(AʢNQ 1Pcr!BD Dy %,wVz=SQg鴴'C99W8&jǧ\#9%Zq'<AzG!qQ9*"N*KȹE5 -^T!$ Q&gIO2 !dW*n jj^51PQM0:Ou5FXn|_TɑISPjt|_7]ϲoH:<.w{8MݐϥlQܢ$_gDBFBD :ZX*5j;bbkB鐳LH2=rN ,LJqGDM TiXl:%c9RLce!++ MG$r_ddܐeoNf|عjr?`Ҽ Fw+VyY kąDb\F)1ρj@f)&oLhqlfcnx Ys^fɕ%9t5ML(J_uaGðb.FJm^XjNjwvoy1,R s# k]4ft!3sWV}K*PCFdH+m#ItIyD^6v;mLaL$>7HMJ<-XdwF|E{,2%:1&BB?cP-y\6A;+cшǦZֈP4bшqb2ҼQ/ljm;) 囟јe*v,Uȍmԛ~4v5[ockύl1Rn/Omt ΌQAj[P[IIapn{7uJVlWupuZ JZp810$J\$*Qd##kLH.D07L,hdhk1-'Z#gźFc1i{o>\es!+?h  3V 9lse1$XQKՄ9-KK4\K,@xrG v'=aw^i0oXB2=)ѡ>xSݪ>VbN6ipRRmmĊ8h<ٔ X0 Ѡ׸.H+.[VZ`u& :lE&Q7 ;>v'k?$S2:jdIm:.<1fJDVVZݪg 2ץ̛QaɅq}-|мl\F>>bwc®0V&{3y?'/$}a\M&V*k4(ID}fesPR5_=Oa]6s`mK|crF1/-fe~|vTh#a dcSQFXp\ bgա >^INvG^%V|ȡ-!́B. C||nkHi.xm%5DA ^hꈫrgxr6eyy=[nR9\Ё{Wy(vN{Bhn'i4sGBn^:+ P"7HXvcEƎHq49x~[ι<ɠ~>#!P{_3u޻Zkk!4 މC^D9'gu[=tgʾ"?]i-hRX; !AzؿNmi D޹:j$F ̊hXQ:e}`ᖕkM䔾Be9ăBAZ59r  4qj]t1Ax5GU:>]sr.#=]jڿ{ס^.A-T/cu.笪ߍ9wީYy}.Aɴ(Kz;Opz떁~|;DGHw)-GGZi|Hׯ]^ᗿ/(+k"XL} _u.:x:QfJk|VGmIO)51ig~NRt\FOLr1 | 33_/|kId"u qfkIqIшPmQΪ&jodwVA:0 }/-tJS ܰPee`&pBɡ?睥O>yԉb)iF]#;`V@]U,W!"[";>:%tŁBK[;Odi3%Y+[%ym "RB4af]`f]\) ͺu=* U* 4[7ks2*k+FPu*StQWP]q"'2Jv**S鱫L1E]=CuuB 6Zb2TrV3TWRqR @NF]5SQWZyL̨ݕSJ#QW\&NE]ej>vu >GuМT*ɨL.SQWZyL"E]=Gu$Wg/fWҌYn*j/3M  V3/To{S G\kܜ=y+ʛ{̙Χ5LܷWy;n4^ l7ޗB_(v2ȨY|zݸO'Pfï~e,) Y5?UW77۸4~T⚱~aLO('s敩GTD~:J0g^klȡ2azryupdr u%xњm}F:G M, F8~H/{?M؄YGcNէ?6L6Ubz8=<1W{WFEe;ALh9c1;]vQE?{yg01lHLN]n HDVrw;y#>j%V|*6θzrݿY+h>뙮 +@ p¦㋿ \)tfXWZdzHsٯqf6#lemNڒubl$֏>;g4&nd4eCVe+^g#whY-&0*Et2]L)Et1]"eH.R&"eH.RY+EtCC5Hf (eFQneFQ\-\*J ("+UEieZDQEieZDQEi0 u/Z\NaB{Qrt=#QrgIq0AmQ~ ]qB&(ay Yj𧓞80% E=%afh/IO~FՒ'2Zp#2#41KSjwLV8.e 빱1$SIIψO9jSq\k<k# ̸v@|zyHhs\~z_1fo/f&\[lKvH Ԥug%ݞ:~b xY֮Y,J}g}ї- Q+ɯእ0ǃ@$SBe80(3Y`j#Bmm"fbeӥ^^[PAyi/uF;AZR0IC21)(Qs#SM.:Nj5ɉs$Wڟașwִڧ0 a$>\ Ah  5ӌ:Y50&J։'k HD9>6O"وgD-BJG[Fh{0T{.(\p@qk%j>qݢ?~V FͥI9E !1"JC J)?oWhCԴy*!K-N5rI!U6k5s+&@޹ DP:@ ƷY,JJxc蠌ܕΥmâAxQ E2.5UR/IV̒db۔Lc`b$Q&%L! d rhl %SZ;Ximv5 97(/5)z١p9I!4 Ⱥv茜|/x:scٛ寯5ϸ_zEYf)Wϗ l2J+}}Z.aN9@9)W\)d :DyBe&+_=zn*=RcmKReT 2=ZwezJt_+,#R=WŷFW]I`+iՕS,TȤiwzJԬ^GCaQ7ʤ2I-u?NB c6%H,s\!"Rb6sR9dF3rZi.30 Uɰ閂uDa\*ج,W7cvOS}דygc<(=\%r'i :TzfGD\THj]Qzuh{ZlO6\ͺCjYQj[wkyiҢ策Clot=g36{v ^鞎jH?kk;[s5UӶ Z6/y|rK*o2lkn)LY߽n>oq}?NeJ!9z 'ԿRMr_$4NF{[& AsMnAemƈڡ@fEmtHx,5X ՓWvaǽ]<~_ӷRf/9 W/罍g !ffPgyYSΞ IT9 Kge/^^0|=4Rsy۝L\lf6 'C@e: ܪ, ƨl,Xi('_{qSGZG`BB-(nܺt5to=/z=mwz9;yC4Q*/.9zGkf tr>(^$qT")pLf>[lePQWGp2fŃN-х<\Q'ǻ6Q+)Җ H*]uEHq@R񄺃br"[5:1N G~y^8]EupRHhe"u4 Bp 0\+`X44<8e#j^'B@',//wg{1Ÿou=w ^Vx[R;GR1L';>= l>> ;INuZoEVmW«^"0%1_E.槂VO"HPyWyBEXflNw0+كt"<*wW]8crʼhֳae[ j ^>TMP*D3de3#jəB݆xɪE,-yT^RIho|'yE1I.P@w٣F-gB+:Zig)6\=n!v$,w.kx-7zK:|]6X&*r^"Z$CڋBFhh A|k.gnu)MDAVˌ:al'2;HtsK! Xe_qzCğiQY[m yE(\H=׉IkU@> 2+o*v=~* 㐡"TGk$f0FNpd19X$2Ss0-#=Ҿwx^h4gyi gŲ1^*|n49 b|g fǍf=J2̻~PaQ\X$SbL;Rq θdK\f4Vpq2]zsx>ҾqTvdӨMse+Gی2dYL򠈌c~uǯTISC{⿋Ά/Ni;Dѧ}߿?};:qB+0OXJ6L/O"@'pgCVCKZ e^.xqMSn.8m@R~|w GwCr', :q'Ep6e5U?ܦJ(Y <qcH36/2XYp$O l_6^+8<଼" W+ W`wSzkVAvvKJFysRFCf?M&'/[㉱,d$P %1K\r(a2{e6^{iJ>W5y֑hI#gzO !J,BH B&I``=/:Hc"n9=<9a߃\C֤=z_+Ɍas_8- 2w.' U8HREp26T.vJ)u e*E`9"KIEL#_;#g?N$ ]ݐ_BpR#}ϬtO3?׽O Ҷ8=|%)4ݻ/kh/C\.F'Cpy8FY)R<{ TUN) rQ׉: FJG/2#xdR4 +<Wlr( Ze*XӦ)c.8SAq"aT"\!a:FSd ־cBOGo?>+e";FFNZPb:@1KdH$pIL)&Ŭ݋1$yWu}6bJ%#M-e;fϨN]M ~1L+\\BgISPj|z]7]/n ~iODv7kSi >+j;-FexG@ppm#dudQ$Z;$\VY+0-f1@b\IXrZؘ}Rt9+)&MTג3rKJgXY(M$r__dܐeoNf߹jv=hּFϣ+VyYk,& Hj"H y.+3)ZPuX:-CQ "6hT0F8,Oʛ)3 /dTCڝJmٱԖκhx52x)eJ .*Ǽ ?{Ʊ@OɮGEu|cfMF_%Fe }‹ġFP"`[4{jjyO"x/BH na7px`b^A9AQNRm2G8Aӱ>쌜a I1F?ՈczkMOTZ"v2%sF %QPo KhⱷGᤰbd**WJLd$&=cu4(& lJu49kĻ[mA/NEKՋc(zō2hyn U˨(c4xTNKO"pR6F^܇^<]qǶdw?{Pa+ gd+쵃 ~ĩeE" ~|GJX4ߨ~ḢJ)7-UUx '߯OJ4ɏfmH-'\'w!t(;?A~JY%3OۍZŭrwR,tf eQ,%.0Vș烾;9/ O^UIA $3SDNBc˃weyU-IUJ-<:~]1Y9y݋P ꍪfn'i|ٛee2K S0?N3v2'f28HJ'JpyzIeF!L{)c.igՂ2A^(3|nF 8Myۦ~W6;mPG)yn5aS590Ɉ$n_F,rH# K냉kʈ >b H#2&"q 3rZmOq96>R6޾Vj ܴjl|ZcY;~zh^uUryu0-i)ග|AxƐ+VΫ3ٛX]wo)7eszh~_y*gP9ͩsLr2$1='T#FEJ<^|{ }; ˾ngS3viup&1 eXxdaU?0t6pӳcJ>sc̱|6Ɠk)R\s|FgzxGUBke,:_'<§>{VJS ~CYꚚ*^ nnURXxPםp~}%<@rFϵ,wJÃ&6horpw:O09sH[ SY 2vj~Ey<,IQN)93ZT#O? z Xh-vj_ęiR7ߗdϷ!;DGylb J3" " W5_R̼b*b*C:rD^㝖?8+=g<)?\j>_R>v׵^vRa΋ڿ:LRY;^SKQD,Ϋ00mYq$:M]GXJ1"ZKwa|"؎=oSM;[VVU.ť2{31$ex-c0fURy]wQOᴅQMMymYOiMP-عkM]v y0]ԏ~;*,ފγ}3>W=emf^ov] ZGK!zƌƁZ 1b"sL#,8ZI}c=nSel rJr,R7E,㳻0f0.H܁Cwe߆E.t|~d֗{j45@%^Sqk ~*i{;37=Pܪ [sE {>&'t=_ ݼvKu%4;vO8vSq|bql{S#s?~†3*`d<9#!;D5:{ئ`Z=6mEKMgYSTjt $`DIB(TrIgp pSQ){ӍOfW=<_cG|,1P")"a9rߝ`"k: _K5K= }R ;风ZuE #!:r^;"^ i m. d =Fԓ=}:ŧ=K]($ՠS*KdX ܫb;iYX[W7 _dfe }sx/4@5*E"$ի!h%JEj7{ \2{a-Cnw 4C[܃ o/P?p2Vo[܈łEfO'<%]T]nug%oAqR=aw?ݛV5Ln5CM3mQ4͸̸Qr9yA$aH\ `KsҿAo8_g)K&b|6]l@ $*=^6+Nyx-nnRk X*X6:6;~qߧ3۞Ek^JӺK@?q[Q:K`xB/|o>:Kd .[I60d8B܏` |1޹OʒEuM: ,ji?,8u5\Ea%Q(}MT0XȆގtGe=O܈6PW-(`ӮY +Ƥ|VكMH(+n =x2Oa#{יøT%ľJOp~78!f̉OI%&tS ˻IT=ZFΗ4~pfoC_F(J~d OS b@#ޠ ;5iϧ(0yuW  ΊN[lBl::rBB.#ca8x9M.HvY&#_II&[m1e3~LUx6ozmHY'oj U<{GgIG^,x^/??w->Wj pg,4 ^-kB9qMd*\Z_W^^ΎLkuU}`$ƣOg}j+&M##~Q1l"J5r4x(8-7zz>pv⌍GUQ4ꦵJJQJK[&Or}tͯAS9Ͻ3g5Z_P4ÆA19Eћ_}חo^yox7ͯ_'ށ|Se >?^CVCxˡu0ꖫ^lmu-a'HWfyu oOx9\Ζƽ/J:1.{k>"yNyϲY)w#^[uLWrd36]A8o- כTG.iIWZiݸp$/_@|lBoRLRX d4 "Z!h}TP?_㓿ǭIEpTIl21hJKbV|c,#(/w=GȞ'`yR6Ȋrr** PdQJgk U7")J!Gqp@q@UU3&JΉWm7~G{7F^Yt ]L[7Zm(L"ª<r]*w$ AC##oܟ,>I,y4YVO)RAeHllϪT XXو)ΰDIF.UشigtyLyGW @L~k!pz/87OwvчwNIug2HbVtE&> :lj:=ʝ{;ϐ2bTTѓ&Ðʲ& &u488#ڢNODDHAF"LFKcH"/RPD &rQg~#־)oEq7BH^kDUBN͒ 6iM sS 9)>%s@l }bfJC`M-YLދ3j̨Է& K?&g]klYbŜ6Mzoxw}=rY(97כ.I{CϯM=ʷhk@ 6S%yeimȌkLumb|A,)`9+`R\Kx,1)PO)-kJ,ZKfӹY2*da3VpzFY}'Vfo( :^9jz6ͳ͟JAݯΪQVd2 )V=VJ:KEdf +Zi[]] oCU6O*6ٙLM]E_ŢL$]TD tni< *HfԱ֍>H3ppF&R2K DGTLIC[yXǷҰP֢&ϲIdH8RM0ʬTh,Mfy+˞U0DlFJDX"D}ZUтY<xia,$ء2.NV9Yܑ^-JJvh,W__oa_{Xhj⿦ءW/,ʼX~!?u4ր߂1q }! FUR6KZbBh(К3 [zISۑocU@TRaMvOK׳ 7jb588:CV9o} =on +?Q9[Q4g*ILŘ:KC*fK=iФI>njeQ"ָu01(3JQ(aeL"g J6J7-ʍ,b N|#o).+ŸCbz 1%_5w2՘T蕀x{l )s SPO!R426IU@ 1;jsg5j`$ rm7PP#*8!Y>-s[վCΙ\-Afu35%p:GOUu%Mhފo8mzA[_As(2֯M7sC ze~<u cO O/m{O~O^[1~n'ϵvm7w9mڜi| 3 e=,/=y>$>°lүb"e#_[ǡ[/6;_oj[1ˆ;_Oohß/g|/XƫLX.O oOëֻ;=!0X |a3M7 [S"-ޚ.RT!,khk{7Wog,z7m(Su~te%/*xܸJ\]rn`p^(=h եl݇ɧS(E֢ ….iOyJ{Jk!y畵OgtImcal(EfHyQQ 뽈Z!b.HN ei`8#PXޯy uo~!S= jHLL!S.QjT2^`krۄyNXMPQ-WOQ 3u/K|{oH:;م|gĂ#Ϋ\|0}SBūNDdDIXC#8~Biq5y"v" @I[ú1,C)r@.X4!;Ph/ E6O&I|~ݢARvrvoNgG{nUʳܟ(%XVq"bq[YB3&U"AFrBFfDiyѱ2*]s@fKbUB!` Jە5g}|<geSW_捽ff00zc$iU*(:+@4Y$Q@{B2! B } CxngͦsK>7f=^G|y_k09@ @F.*k1bt3h .Z&eCZX% |V!o!uaAKԌJf-0V"$A'~r^dj5Zyv|Гz=8WۺЅHJ5Jl)NW{!YX(V:Sƈ"\>FcJӓ}ތ&~Œs*]7# rB{+1WGNLk>5?{WǍ,ʄT{Yaut8)3LZRZ׿pD%6)R-qZSApI,4bwcD>^fr+"Mc#/P&b!|/?@2cTey\+{MI뱚sF;ceC\1V{N2=7bl*<ٱX=---cɻ 6 ܺ ˸\L־|đ7m}) "Niʎ-F\ ~^vB\c)Z@DNJUH彸1[Zأ!"dz_V}h_4tثw}x@/|qv^Z >vL>E=:~sA/ۛ{u Ǯ&"蠑j>Z Zـ˷K <*ߓQgg+;9p?يz&ؑ]сW?В;J#<0 VDW5\k֩C2]#]Y+ح:כ@PW+5jEt5G\WZ|}t5P:BނB_Xw<l~[ۮ]jg&eO,(M~vsyy4~6:>ȑF۟vGkVD.y)5CRh8idn޾9=*|s#$ߞQY86 \~{nC\+?"xL>)֤4՟e/o_Nw{ 4^2@g^ϿjQ8U,[+JΧd{y:+ʟGj瀫y-s5*3PQɟ1DW1eCWO+t(JsUȡ)ho1f_{mq:]^7{W{;6/w>ݶwWSm|UUNDKB(4Y &mƘ]^wo@Qg86G+[slp$"޴ TdrdGjoJNg I1|1!>.C{^^!wihNk\NRQwW#гn*dTʎV'eFG3{5hrʬC#Uc]S!R*6c)jJ%3znUsT';:[Cmqs'Wjeߺ1&Xq4dRgmՂ6'FYV&[o-5A<.(nD FM4F5S)R6q ,ZtmիTsԻfCwVlmӜR=FI!SmKAb2c1N#1-.o({YE{))wO@Dk3G6]̭4YWR:Wvڰ8*bC&S$s*02{G>w!0ИUR=^b 5lQ~B#MYnj(`ɘu!g5¿>}jŏYz̪j35ԺVTC*)IW΁zyvY^:aI>\Fh\.`5 j*dDH,8d"\mCԨ*ς|NӢyP)XU.C6ƅ|=u50sS"0f;Dr΂Vi׆R XE`(.t-c64ˤcw552PU'z$ec!sVBE(kGU((NUj;o9dJ R/ [l鶒 .f2R c  ʄ6Je:. v)@@ZVSJ+՘;.͐jPoB+"X7(c)(lP/ ғ,ED&T Y1bTy>MFݚ ;y>:8o7Fe츌 BztqtJN=)*-zCq ;-,F ݚZSP'ՓFCoׄ bL%Ѱ'c &% xKd]ٚb樇 tyB1C[u6\ZtR"˕,;d*a: e 3޻PAz{ZT}fQ ƮXB_{߁y 8͵cQ'7 l!ǘ"/|AW F-8˜Q sc:j#pg=uK[sO:ʦT8FeМm3Q͊xiQc- bӪ %_6m<3i^'64T lFB.dP?)'{֑F_v *Mf,VkA\cUw&*އY[Tڠ@Z{pk7?oyq>Vo׷ ~.ի}r'@,;d\@כP˪ÿ( qK =䍣"#+h?t(Y ]!]Y7] [V@NWetBWGHWJtW7F]}1O(:B‹&mH_`1h)p@]E6jd/tut=fgۃ8€ys o<څ46#fO6?= >nI o=th7?ٻFU"@lC}X f Ӣ&M%Z@2Fϴ,sJÃN[/JhOiİ:~bW̦-%]O-1g.K Pv`d*qKHWCHRգX0\=g=` \=[Bp8.Q~\z,S 挞 \%v;Jj|p\ L:zpE\%>Jt08nSG WB7WiJO \%v99J ~p7WL1 UbXN*pXtpካ UbXĮWۧ.8Kowd*N&n%Wλzp%*GgdXgX:i@R`7yD5~k L'v =Nң**$L#,1\|>rIu^InNvJVg2']Cp2BÉEJ3F İ<9㟢OgQ!7h?+`q \%nges_ \134@< ;05|n gwፙEx'9z`G, +à AjӾ?ûϦI@j̛A nzn oP#spt>NWޔ>k_9%~~9۷s[@Lx_6xߥDXQ#AcO#DhNg>}Vac~c ^ "O%-7Wt +| .bTzI] RJڴ؉tQ+H`{hӓ4Qvz={!,=9kW m MD2Km,leKH,հP4-XAoG X_1}ZR1 @+̯FyŒd~ƍ#9//$Ab|]7k,wGqZ|=/M?Y}HnCj *oO%([&s}.m&# ̴ _{w?W_`|gǰxYdWlq#Eq)laC%e..gq:gU1X͉b%ey`VuBTu8&S| D-kEMc8 ̉fu/'#6^I 3# g IEf@Ċ +JKN{+.&LFzϸeuO밼byӴr>C=~xG-sR\Gj4fl?w=<75) *ؽnIGl8Tc."Fa V3=* K'yw+@ɒ~,"iJJpp^[BJAY&D4 |Yq63޾0;I(ҸұK6v,1.-JsVA$pbՅȷH< X7Et/t7//]e`S2?7aʍGmxI}W)ӭI-Lp=dYڦ,gKǚ, ##9Jg$BSC:mMs(X4,BhpgôA-='Jzx&9H9PXz?GZ3P&AX1iDF4QAJpDRQ @Ѵe=kЫ?^S=[n?.a&#=u4bv{Td1i:}Tzf t*Ц ܼz~aeR)[QG";,b "iE)$ 'pjbV#]w42Wqzͮ(ww1[H {R"rQAuQ(N9ޮ~d0ЋcQDd_-+o=3G<S6Z `US%Fԣ=X:cξv&!jjY;[L^/auf_Kz9gK=r><=U~ yƌGâWԿIϘߧ\TFOX|/aԃRNû2,s2J2BE bR8<k`2+X:2)qaa|D5#;qlرaǺ57;f_+Eգܳu_*e^;VԂʭCNr?D'8.8&O'F2ٮk5*CzHm^ wE%KUe׮5vv%q+Dʫ%[[nYZn?ӏF cǷ@lC_ЃTbSxM빫 >݊}xKIz.0ϠUX,t mt|]r4wBޑ'v=Mphc;hsjQcTU܈eާQ~ѻSt"'_@Uڔ&TK-+O}K3 RiQো0.YO`|(rH^ g<6 }96?O.'O I_[bk\ett'7[Qk^]7>[s$H[0ƶ^RZ5lӺ2$+xQw f/onEIdH{1yf E u,7u\]---:,Gdr*8WJ Ɓr&= Y\1X$g -#z!UxfXYL^jʈhA #(/Hksts1eW=G7]tX[ng՜u~Ql޳o- i@S#*N$[u$X8WXቲԾU vsx5Z`(3*h2:ZrD`qPP^ϧ[ss E/dů7mpF̘]"u#6ʹh$2͔r4yhGΧS{#/baD%4Na ͝w\9␈FGj맨.(Ȟ҆lB`8v۽娨nze5}c{|H0E:K%JbԠc,F;DxDH||gtvA '(1HqaǺط])O)1(-G2e"1*0`"{&CDSDci#`NYc0g;Ja[a$1!>"gSyiHoɻ2DnCJhp>}3G)]Y`K&73|>ϟ|fƧׂJמ8I"QB]`l0U!Rvg:/|=CWl"@1H@˨m*~2F(#쐧Iq$wp _9N^)%L ;OObkm#G_E?sIC. ng0a >c]dɑ$~+[_-2+-wj+:Ǎ ի"4xd)%m8^Hjc}/=zZp`.j[vCw7 ͉j(,CW j٨ gS-IdvVr:y]s&ho^_oSM#Nλ\Ԃ$NRoǼ4>`#4QD^MnVM9@ 0PQQQB y;x%B?>҃_&T26bm|@f/B'ߠf\n҇ClջZ/i=vd6Հ%E $v4kH(fDIaW˵fuOЍ"-[}-m7Z\+9v( έykIANNKͻpLwKD%!A]eԮk٪ϵUvܸUWJ1 k~Ug]'e:٬<Ӌ\s ϩ=2~~7H(q-lגco/BQ)D>:ZJ ,@gqzxfM׌te k0;/@6r0!=8?% ];ӳgq1YnhWYB"YNGa'qqu>*8[ qX^嚕'6{N&A87,yRoFFH:|$6}&z#k{Y^Lq[1r[R6Ng9qlG@/Q9vrNLunnӕ?4GuȳN38w>FpYd]W>k]J.aC*[ُ2-`p-y_~"i^F2h?_~%EGH'6/ְ6|yqu1QY[1\E2I]"g6 4w>6լe.5ņᚋ&;uw/f"jl@7ط`Fnqg\{/4,јBh2Ar\ 덎9 F:=i@wǔZ$7ʫwv̱x*xh}Cγr$BrA$\sfȞ^+I$vo5P&k~(́>k._jqelz9RFYIy!S*U_q kRNeq3>%JKU=EQ gE-чrY(Ephj,C1jxi5o}/,Bv6EC,G)-s;DbQgA؜n#a^՞ a'(%#22hjG#u od'6'l'IUKGz/=^}"%a:^#h5U1 TF7$p$Jb?h}4H̑; 2}j ]? 5u*8ɳ/U<v#}dǬnCćܡq^dUEf^%/r2YPuA(Em1+8Nϝ{2D&5)xe] &-|W@wQ&%gL7'^YۻXs9ߺqYϴ\iўlrki'IHE ƛ[ͳ],(rCFK^"19$%^ r]C3R+d miLqʤ G{S+JJD(QHg'<ʈ?c[qC|wBX7bmtCXaRkq R 8~ʨ=? .~_jiDiD>D!S`FԄ࢓ڡpDe~/HuiC{<6@@h $xC(Br 9)ぐjPr3L͏HnY>lo =U7$pSp.5;V0=aOOp9ooJl8nDPzחkG gyk :țSQB3r'(;S78Q đi{Zh9rVQ Fb锎QHՊp6e3xPv9aKY!ER(|W^DGGRCII`wo<0 kMs\'Npkaa+==h[g٪xq>?. n!F[<I;`Vzih0:XzˁstO\i_7ˌ&Hɴ'a2],zsx:y9.Lou}+ˬuO3B>r/M瀥IdqH`w6ǍF9F%  ; U?~yqū7zy \q#0> %<_D F}=6]t-[wI+{S|4%~1;O_Ù ߽rS<,W , \a.tXwM,T1WOCQ!1*!j1ۯߖ_={Cs0߸D#! -)ZJQ";}n4(鐜Mx: xj,SDŰ7nB5E|~P G7;ɴњm`d2[Y$&*C:uuv*:Olp͑=hg]V'ʽK^:.݂KOd4!RO^KInO%iVkj]ѲzFqߊQNF%I;3)$ӉEA.:#H0qDF@.H¡aLT&$G{O5Zks Ѥȹ'nhrHf6|֤ę5 מ/'O w"[㯥C'@]J<:T328sM؝ _xV~>zG?7 "B)N#Xã/94>\cV3.F%_Z (2N^JB,geͼB?WATώZ\|j:snѫvw;a~sP-,aDq^G.5t;4h?Z$,&jqwpVnwUG?S筏 /9eÃrbr>]奜N>Y}.ٿzjD{^"rZ氣ĭY3|ZHZG*<4p0ϛDjMqo$F2\<:P<:H<:Z&s4X"D@e%R# V2`jQ\rRHV1HL451)-HY"!(@mrր5I嘬l$W#gCµ*{f NT w:݇zjB`I{% OIXYhK(DiuI:1%mBɭ*eX% EҀ͟BDPdoJКSDpJ+ j5rXʶ4Swñ^4~rv+'d#$#r*5D3!Xfw XFA^u:I.q[{j*3JUӗ _ GTĵC-gVKkV:H%%(rQW[|o@IP(_L!U2>BNWG/Ț#4 AyTfM(8iR@Md:pˢRkbSYGamO#?)``4cȠyc<ßGLl#EΒr{"%rk6rksnʎ^h!'c5(nF6o%hJnMbEk0$.i:+V9~#jH$bmfrTJs63ڶ6aM`昩s`QqXnO5` Z:V!;^G|gIW@f# G20zd#LȚ7h=uF1qJEG=@lN\NJ2fMhCl@uF)li))\:@^'mmT%5@bk!OMqH۵Q!gƯg\]|]\Lx:,յՇ`FSsAZ&؈Ԑ\IȥhjZF]STrjjPSSTQa/2\ \!KL-WJZzp\\iKعU&WKLWJCZzpsR/hp.β b墴Eno:?-(;!*cvοO?tiDm?VߑJ&vCDp#{q[\|wˁ;# uf|[A&r;Kߋ;Ļ`8_ea&N>qY9Oq6QN(r#3tb:YTB<_+m"YUXR\ p$DcqGn*ԞߪjFz!inaYI5oY af+SF:ZD*R!KozfU*jAILe=w_M.|4LԗejRsp/2^ \ej=wTj;+ T\ \er }pԲwWkU&\2ȹ!^ \ejO咾QJ KϤ`փw\eh Ëϳ eGaµ < ƚr/=?YBd',DcS@%8´ WYN "Q0rc"؂꟟Sm8ߧbۇ*EnQ%w ԡdԩ&çk|m/WS|GBJf0x!!ĹO 3n7y/w;'6m䔮e..!ϴ*{\BpR+e <, B=U6$,ɥ53t-ǽӔq[Xa2 d:*5dϋ%u5m68 - l!qd'%'~nBs&t!`F_;Ap(kcC^/~rc OEK)E2pC:AuT C %GM֋ %H.XpoQ 1PcryoDB:Q(KYL5b1r6I77{WK}S1J)y/ϧlc"ג6C, ]^ W [غn]LZ7zY~lb[n}W z^hWcބQCڬBbW:.>{ϓN-\23֜tr5iP,9f66ʟsV!k[H73rA#{i"{(5gix+{7No ԰hΰEK ʠyIsм!UQN'>1S򞃊PN$y=-L0σRL DLK\MAI#,݄;fEd:$Lq4yJcf\{CvCYw!t~.]/D2yI\oxĿÄo_r&*~UJ3yaJ/ TTs~4q=RXtcֹϔ~jFu(*k2=2:3o kZêӮ4OL$;%)hW$(|3\r6́n҄'ժۚ%36،^"qPƀsf³Ǫ NLQ^ړڤ 'Jڠ$S(oGɭ 84%.h@Sc6*DF@&=ow"DK2 K"u9@F h"\FE\fܐxTjUzep QN8e%#1(q#gg߀q0ƳlXxN$h ؂ݗa2mn֙weк,L/ 1z"yUkXֽ!yUwVCpĀ"i ғ(BD:fo:c.ӧJ=<箻E2)\|YmVZArGBBriDԙ$DB⫐+g: 5ArDԚ)c$pqƂΠ؋@T!2i5`I KۍaНyn]pɔCGk!4 ӏ7uk ^zz]V,K='<|g]nrs\$I ,@4T3"(ʂw2ZHzeϡH_u|EgLݸ?+\`K3%O aZNPT*M)ո#Íyh^9 ݏ%Ce?$Gƅf݈pS+f-uF(WR ZxK/4xXYݲI?9mXG&I:jAB*uRJql@aJFABIqsDUtnf7.v˂rZVeu(ēW*y JPl<՗LͯDLe#h ~86ٜʐlQBk\zuY%Ĕ`/{8;ME8WRVO0qClTq0J('j@nqQ8p޵q$ca89K$XpQ]mL Iّ_ˤ$-زNtWGggsViտ ÜURD IB Zt>:r 7 ~3x̢|8~>=}^!/zE/۫5˗ˏ:镶FZy=<ٟ{bkMSGl>hz-N,,n[~ŕ7NG^_fkZ]19b^uKDdOd{ͬw>#e4R{:承/6/./7?ԟ}?~-~ۛg`NHBܙ_w"l| [ 㭆ah[r2k\|0^}ߍYgؽ8]Xգ ǃ2/sa8#>ˑR&\F:ν z3;#AU8ܘy%s{=4;I(:jdO)2c71h B߈Rd內|pza^|>Þg xReN:bu2Y+^L5YT QuUv@3&;Z bö~.](o ]5ti :/=NRFKJZ*A8Tdgr;I2 AC5XܟPhijBK=pi(GTq C19', 9 !"f .H:eQEv6b,Fb3o($BHE!z.&mF85twECQ.I$ά>:N,!N;/E<@GVhDsyz`WEZ4u P M3)HbVtE"[) PDz<%x=IdHsT ѓ&Ðʲ& uO.i488#ڢNODD #QF#5 1$U9G_.YBAEQƨ9wuQe bݾyG_~!:gUBNM uEBjrEdd(Aam*dPIPb"[߭R{'SC{RcAmFJ秊 K?p ӳ.MNhlYxrYG-<4O g׋ 梹̓8.GZ3R_ib %>tmyQk\0D$,$ q{Fڶes XB.Z*V=jKDr(ŀZK=%VR5c3rn׌J3]،3Յ.ܶA~Aqӊ7T[\ \5;hy7t0M>Pa5vR}愌Km,#a:o(I-J%CXҐLj:bx'";Umj3LM;@ \EH&t=zVܮq2Η\}͸c[km}{Ǜ"sƨ&Rltd:SZPrvDHD maͅJ2#Ci؋Nu6mbhA/Z>*؅"%)TCB12ZFc00:Ћ{twl7P[܃ ݢw_o#TlPEEx C#E?JnUB+vw5'E)v!f!~BgBД(tC*)Ix:+P݃UFάtHFych 3AaHQHdpI"bcd݌;BNRw2yOQr䛓Zې0;[^3VkLjٟ duA( .[SD"3h8YfQ:,m<ݎkYdI<}BbƋ7-|W@Ќq(O/'*DFي׵7Ѕp+݌9d~2 ݌sSfXp#n{(#AU`6= t<w`:2-6s2eeO]$Jr칧Mz?[}{]zwjf}Eipmkb-vPynP g>p~PnZ|auVK^Z.Ͼ:䧎?n6/$Q9s MRt^ )[SibLr >^Irvs}; h.#0FE{yT2:o61^c~4/<z<^oyg ~;-<C'^np Ł\)I#_W|jKir:ʽBG&U#|_Yg< ~'sv~/88g_^Gkߜõs uK%3z%FSzTlHsoʢXialZ \ P+om>@]T|ľfw~}BR);AX8JIR>(^D.9 Fc)D[ ITEO*YRS9Q@{B2# B }ڥ C :I?3GBL]Tm)NS BP|[MiO-ƈ"\:(FcFӓ}ތ'~m'{;ع oGw1>e ޳ʜGBBSD򚍲&/KޕqdB &>^ǙfluZ)RH{ͫuP"%6I꽯WgYɬs[o;\`ORyuqcOV1gHǮj+sQh؛ὗ*&EQZR޵q8XcE_\;f!s`0 :bܰ?#͒S8d5!lr&c՛ ixb(8;Ė7\;ȅuZWx%02vCN봭\DpAr.LT`6 l qh=N4GACِ_\ʑKY,bz=#Nc}ql#EGBnt]w~)!Z~FF'sqmL4+.eXZAiI9pV?\ [!u]z!jVWi;ovF7T_m84ljc C-,Awpi0DF]"J+ZaxPk#Ն*[U\mҔemB%5/r40!/yo;lh҄(-ߑwT>}"B "8_|[afl0,3JJsL(-GW h8;Qe[y2}SpW7s 9A0RN(dUac+̷T+Z6KJ1!1-A¦ vN2Esx<Ɍ pBiN܀pYc:ul;O1nRJڤ/ pj ]!ZNW򖮾b;V=iGKJLWeGǡPV^-AWV=$At`+HS YwЖN\5XL)th;]!J[:AJKDWUL7sw >-] ] Md +i¥)th9;]!J!Z:AZ&EWXA+DS ѪԲ+E͡+k(o ]!ZQ+DYЕTn6)JL 5mɷ"4Us&p \ݘZZwQIҴ=h:2r#ťbQfxM'dLVxy3}[7xBQ&|)nke잙W2+cҠG, zsW ɜ\@l᥷`^'ZV2+'جjv}hTff!JQKMj)` s2Y&||4H `ۜa=+o?Qj*T4HcD [֜IS7f DUϻ*vj/x?SBxl'߃xKWzj2At5獡++CWVԦ+f5M%+ o ]!ZNWRNs<uc BUS薮N$"m+,m ]!cﭷZS{bJik05{}}~pyc *UwBHWXt "~pk ]!ZUADi$ s_YDww2qS[o+Lewō9^;n šԉUs:!Mm;%N0nT $%mAA9Җ;Vj=W%qjTdG#xJVZt%ZzlSƈ +Li ]!\ɛBWVӺ4+$M+lyc RBt(n3#m]`Ec Z䲥+)AtUs մ)th;]JJJr.j]!`ۜ JBW^ "JN[:AR\'`&mc ]!ZNWm5\KW_4T7p dIҕ!./⿬1C./#VؚcW 2349a/5ݺJZosg٫fie|;17QqV"% $Egne|yTT~0ӳxvTh ɋP\^~XZL^uflԻo;sg8{2A8%]De\!S膃?RHm77)ze:ߙ! ?O>(ᷫ[׵:[ %?Ni}o;>|wW1Kf]Ƹ6ŝ7R^R qדFIJV"'h!EhԲ4'Dt!4q΁֟mPmjMS8\E`xgIg<CqD"eNyԖ 2eV1'XrHT ƹLu0ˢ| ÑGBɪ30s!/ ax&hm{씖vp~~´v~vwʆӠ7N{ڵcu,J$'K6~0u~xJMzP4i L{$ʾ\R}bPSOA~>'NZ]d1$8捊R؜ >Әz m4Y=U:2Aj 1 24bx, ͙GLIG<Ո(Yv|:KbəSB Ǖw4|GB?m.3И^?C-Re.OU+bpq;o_lbM, nil;uG˻*ܣ[ x *3P# n8D{gNJ CRxv]Coo~e~g!lv1֥Lw CV ރ7*|%S}կIU;߹:Pnx rWi q1h)e]P>xWLh_>Abbu譠Z,SQ i a),#ZI:T}jCDDD>u棲]JUvQ|_%xtZ~m+r@)&g(}|kKOΣƃv$ ePgAUkb (B֖1ǵ9hlN\z5u)ȅw^.I4GK8pl~08; 쵝%$G5ו_D24PvI`xOc>mk_tsh|ןE+Ri#"RgTpF(N dʢwl10#:\X MlB;4VVQڿTӴ#IG 3y)q59i8g6B0g%ABR*:a|0r+{y%k9xH!$WgT"8a#n*'@{^ku-SWBTA3ߚvoL[Y%Y=Ly737PRg٧OBqbvq^7? rn';7$MqRb;rº*Sn:M~x՝΀ϻih" .K*/!&W x6M?ѬmCumby`!OsJ")(oLfꥂjƥ ƺag շogKu-vtIm4iaFa;|xײŝ/泳兗0M(\+DxI|%޺`iT۶;mɰ.&S571%?A b)b+> V=ݤ98ɜX3UFOumn+CSZ:aLX0RFvigy7ǭ uhqg`*?Ÿo^|?~xH}͏/ ,h[!'CI[e==J[%͍!ie:d{} %tK1;t w~_ӀW/ôM x(v n~^ (QpfS<b{yd{lP@so.V[&צܫGwhr"==qD!WEA)OIր\Xỡzʳ :NI_ayE>Ա ̜8sPG'&=3%g3ԓpIg] * 8%)CN9̠4O4M1}ZGlZG٭D[ :owziڌ(ot#8鎸tK}Kq4pfCvp!B .h19c]] ($F39RYLX$ ΎRvNV}<6H>exy,w2P%*#q`0AFDHbSMbFVwr_jf$ e*+;++/"lFSt` 3jlfQݘh6*o;]ߞIh3:$ OB㢻o6,Z|ՎL8VN9ajq& ^Q_JRys Tq^؝ '3S2{KTFim@5&z96.BTD-c-DP2RLd.ҲBdl͒Vi& QƮPJpnEݦ74ٗZ :rzgty9vy1rOQ(eA0lzZJ 4JޢT2Z,lh鄦mCt1 K6rΣ3L^'_REMtsFOf[-gĆ=01OEj7]n, MpFCZ$ 2AĐtd!MMD. m$Nma-ݑ4l!32hٳ,Щ:'TG!٧X6[fyڇU0DlFJDX"A"q+V{йH5w*NSJuE@*xzM%b &3/CF+&oTr0첓^Ȗ2E+v-h,-gDvSI7gSu6]bh, SG[Yd&F(ZH_6Fg}Iˠ^h F\܇\<y،:v9R"",!ɡOM>5,ǣWװ̢ȝpobχ\>kRV@d3(0wPKY_U k5a*R+] +`M_jE!k(4lYj|;7Goj؛}(hzZKޗ<=o`Ր`t[Aw& D?3?yU䁬%lլ|Vwd N p!9mQzP.+6Wh|X\\r)\mClRҮA+_iwf!݄ŝ{Ár%8@#AオJl_|EjJxnA($ؑ2豋9c')Yd']١!Sɥg\J=wcH]Gܾa_v-P2 49gt@eʩhQTѩdTNAgHER*"D??Vs",#@S7#wrii9Z/񗶾tL1h%,TeQ*6Fa!b ԂKN'NAE\l&̑L(Bl_`|+wzs.Q8.F;Hw יlyKM] |x_)aZDL1yH}cz:Q$o:PHHQ zt8G0Q"IQ( "g2)+l9[7]^vobY%Pe,$!/h &L5!SJv )s !RkYM!]FuR$d`$$E.Kg}v%ic`,PvP5*+$xT,Ifht};*wS:ֵ! `Kh}~n"8:ItFD[{Ng# P4цk③Փ:Z'lgXKx`6O: 1C Rv)`t@uT#/EKhÚ|~ey/vqRn14>>\-*)%28f)*"3*1g!kkTG*0o埆]RGjBSHz% %'8JYdLUPVl99x)z|<eUݽ_o7,bҽmY-v`Yɲ4ƅen?g^6/#hL us?VOw?k^?V;ߌV~Wp4?oA(O2_+sBꯜAFV=rL743enWkf9-Ƕ3%l/ 0߻:e\ݿOg.OܻLf4[_,}{6LoW_v67, 34gLgRk\9w(\N~D5TAIՕdRg]uEMm-̑5݉k(( -P;^;`RB2BHB'AI#х12#TV r(DTʫl+4y-YJlm6[fSϏMY\^ Al3O(a~y\Qn*JɑȑLI@֒HF[ D[foǔ5wG['iV8: JWasP>cIY<csLAF{TYO*$%:ƹlgIE! bB (CT#B +E15[Ζ|Wf=>#T}*51;HE]jIC1VH%E֓4YZBsA>2 !A˔H3*POat%&K OˊAxej5Z8N:`]#~/;VK8]aw BZ ˹V1Fp$OoOSq3إSO;?6Co=egUmEvB{+ !)f% VEnƹCONȮ3ߢ6{E| /uz3*5$H:81Lu8=hF!rGh-:Ao&UbW'!]?Ozz Q Clܑb<bO/t6 ;k?N[ >lLb cn;g 6WC Am("J"+rm`U^efҚxB%y9۷< ~E8eMFؗ;p >] 6f>!{}.G Q9H((ۇ’Njt|6|yH}3"D)&002p~wjb}s%NU1^=^7o2/y-7`k.}lfmɆE4fQ,ĮF4>;ʕ&_+z);m EJ;#MƤ(H2t)ҚDQ?.?xWԳg{*WWGYQ?׌ˬGOD|z$k PϾ=@Yt~K!|g(`t(tl4`PDariJ%P6)97Ig\&^UǤ*+R(Զ9lQAu2@d HF2HWӽ{$-gr3Ϋ۾u͊Έ׷!X8l4Y|>ńwV8No'Ys5a~se0!\DnȵKTWp] O]g&y/V)G_Gwln즵O#;ͩuimfyyI-f~0r==Ԋ[]o ݿ64vufnx^o&Z4mTi>cyq 1(H:}`lsdlo^==𩁥.; "t! bԟ`QK=r>Y}^A{τZ:PNB{ :C)]AzH|E%)E6Bኅm :m(ztJS;־YK+ bz~ov_}U5L/z YlVJ. ,&wKl>Q.%?{Wƍʔ>$w /r%يv&ec b%~3Y$R#cΦVf0A7@9KEΌ$g/p8`TP9br4R[eL!$#9c^qBy:xj)H#, F9"#9S1;v "YvOGCkpVbSyإ3f'1 a(͈p)J/7g| _t?g&"(DU\@,h. ghN0sCc{ix&ig#ř&,b|_;!q5Mn5W asd\p݋o VMr9(o g,ɸާXǖybՅB/MkjtdA!|2fN^DA]3 ݭ:6I4My>rTjDB׺v ׺3teԮf=خNtϭi/8nUa~p Edqd4 Wk_0ڦYc]%߻qU1rZnɸ.z_Mv4)xXȟHSSaEx,yV8OϑFFsUqkn /| 2X7o5t.Kƻ\P2++᥋qMU|(w4髇'~1~ifLM{<(fO^hͿwzh1-030NBV=Soka}&Cjv,lhy~ef&>|7O3j1iv-^yBvsU=(I.g-0| z1X08Z- $,cQ%pU8<|f?]dh!.7}_qh~ZCV(a|)\J @PϪ!T[_ߣ6YxPmh(|}r»4Iz}G neXû0@UܰXXkPP?~YEy:rQoHf~1j@7?*ꙊiSd=\x.Hcq5VKiј9s箧 *:DWXU+thESBtut$Ct ]\N:c]!Z%=] ]q)43EwA+EW Ѯݫt(mOWHWBjBl KBu+DiC+)- 3֪3tpMgA@ Q2ҕR.6ݱ3#T;]!Jz:@JI:DWtgZBBWu(9!V#Z\xF*.q~P|gVŘ5Κ82V2Q9|@x_Wo^ɼN5G97`| ̀c""׻ cWy1ە37~qGx1 {]}8m[(w@@ﰔFq)'Rm$.fW〼i^sc!TM,`_AͿ^}@hGJR(S9aw6 F!y\MdWD~QJ h7Xe풛uw 팛h%#NWr-ӕz`׫c`+{k;ϽDZLۡ{6+ծ]O֦CtE"t.]+Dc QJF!Bt]+DkپTk=]]qC39+tW ʽw꭫C+aCtuwA[e;]!J{:@RM:DW8WЮ}CLOWHWrݩvlHw+K; "ZN*=]"]iCxmZR(~lVX6koQovOC~<͇aNSPΠ_C*TB !(QBQ/6YGqŽ{-cmԒ+'Z2ÜL '#>B>F£$VM${bʽfRr})Uu1x10!TQy6EA0ʺJ#9iz(Uk>Bdw跥ʜ\@l饷@ *_b{BՏGW7\_̳/FizE{=elUz=:G;Vxg/!k\u#υ-H5 tqqʫN0q<;A,j'K[ΛI"7Gye 'X?l6DigJ2-E Y&3:n1ݟ/g*Zn΀~,9NK cYI4O(,wVrWcЍ)IᦗL~j.mU[+5٫z>^,^]ge 78O[MqP4&Ip:?%:|(/LL}~u,^Y5"A2:gcсՂ@A`r1I 3fAj}rP|e:$|5kַҼ jX<}o\W#)AUL }Y̪)]7*miO凳NeL2RhJA7 (p9VaZX;ai`RhYkN< 0$)H{"wqF- Z:%b6>0"QIVgs.{g+jl VgeI{d6ھxl!q͎jlM:vC2q{^ ~|G4i?qr9.׶j kN'#z7N6?{Frbl]/au ^F?-%_߯z%-Dc핆Ɦ_UHJ1Nƅ5&%/W&} B*!`+# qNQ:z,Ns GRD$a̦t냕(ʼC)PZ0ZBaHJS@z#J L2,L."|Ⱥ3r h{,UUSee;b F2V('%K@ȹY|R$ngU:V0em|bG4ɣ'`^YZu;]wdb |De*=2BLPV⾗T*՗ze@sW0TpAg˿63sU94oyx)bSM:A\_|oN'iNJ+Sh3 n|W *wRz8ޓW]HEiws|= x?(UJɫlFm""J) +TBEhA] .LLJ;-"5'-/\ z cpUL&e;wN%{>i-K>BQw{w RêR mY||!y) ;pkpwȭ[BFQ~ʲ``Rph8rd.1/JɌVa$< 󅥏ʆ71|;9[er֐d,JBjj-,ΣNYAj;fg^23 .<G^֭bߘA\$~4 e-F ED+gVVmUzn(մ N:F)6HlU*dQJ`k 8!XE.*`(^2 L ֨to30$7EhLۜAeveZof}V^٨:9 ~x6 MYȏNR8 +[FJƷY 003ͷ4]U_Zw0A<y/WꮟTGcc҅JȃSq_dBdʛf ȜUk1u\aOnzru"̫$d\uOn$kqyNpz|:\@`IAn/eP^9ÚvFdWK=^ODU't>=_x cXq4.G7uUb]WD󺉏i'^U>-zr1x4frQ~$7irI=|ԁ5ϟOoA2pٱ⡨556>!~_^__?xxO_'8 L)R8H3 @C_1jho1rZu9o3rR|席1;JKu RiMR HihdxTWݮy_aY̏-1-UJDw8[ڞ}d36hϻ^7_!.E]k]Fklܗ/$y@I@BUk`KEI3cGJFK %$k$P'AZa)FC{{^ӊ}8yMKRGSF( C:)Ub=z tN7:Kdgp&&7ՠɪ FIlŹE4=h}K[4[$"y4`%+X]V '<;R&T,v:H%6 xQVlk alpE2xӐx9phN:}E}yuzr4J4N>q@$$)41"c- S`ɚp=[ ARwQo_B}%%cZ)*;B퍋@ DWcB!LhOh>[P&ںO*JOWΖت"u~rqj#V/}6_m+wws?V~9?jieZlhElgۗAh??{>8w-U^^̟䇃YqA;xIOЊ]+֕%ZA[Vb|Hy?{Fڼ?.v`GRѵfml0A Jƀ"soGŎ\pp8 tВ;ϬR(۔MTNi_jrڕ IH[+ &K9K1@_*'߫o]E|Qus׋ND46:5:L̖̭$IDњX#-FUkBM&ۉMoAvi8M /7T^)?ՍNK(g ;qH@3fj0N+LpBg aVM:`W˩M Q6$V dլ VƙuoXC3RD1 *-BgkA()A*CR>PEh $E9_ik50ꌜ]heSv̈́9tzs4^Gt,}.@Bgώto2 P-9a/tC,zQ\ʼ 벻Ы{kiCkľEUgۜc1 ٻn}]q.F&-HH^r2J@!Z*HYcm5} .wwG{AuM9u#zf'& +v^s`M9&׃~,*{J}'kNjuN*LlWH'A6%`hP"4NXUǢI]Sq ӭhyU\D "%`%dQ@2#M 0P|4 yg-"P&<#Gr ᥴb0V,97uݧn0~y5FJMQlU@ & KH1fu%f`2 -,;$2^u]IKnswCm!lĎ,"IYKgCM&BPYLIo-He \]\T΂(AY "dA#kP&%YC#<)X:#gK9ۯ wEUk?ޗO,DYMHɤ ZS\<\3^ی1!)ً@"[F>cWA9!dxz6DtQؠ"YVbԥZA4=Nz jwilBB~k2P-K1~jkjd Q-d(08:8Ig<46 V6jYkYUkAF(!H"bB|1NKg}m/myDu">۱-n~[VBcN%cF *ToFQK0R#EgL,g6ţ;X&~Cn+cpO z>956x-4 kD[$q;skqW/4ŶOo]V6hB,5 u 4ѠFwgv[cR}JީȠUGϊ\KY*/D }g2R)Ǣ[e}02(sSIۊ2oC?9ug<eG7*ykm6]*@(bH4V(9ID#o9ZFZ~J~^}넾ug=N ʶGZAZ w_f&Cy wԕ닪o>n+}4*4gѠ 9Fs/ޗ3r=`ؿ/G_;D&jC)k@YIiYHQX( 2@.#N}tVetQ81XsOFD|9v7u+t٫rΧ{V/?gzp5}[O>Jwq]Km|!90EZh%k_=3Kό-كWHV=X4-jj\ԩbbVZ$פO5WKkr‘SZ[e 1N@ `TM ۂY߁_m,ڋ5CpL1[Cd"K=Yŏo~ rt`_9̨87#c,3Q6ZNYBCx3&SKX $;6Aړ`,I[ފpdnҸ$`.D@5U̝֒I\sNjT))4m[@ 3./rb98{F˲|\חGrMOzr(_~IQ^~WY9=55u=uawmvt;^z_\mf*v_!v5wuH]p'BۛW?C 9x>c8ϯv4zsm[Gshwpl7mxﵼyw{qv}x{u}Gi¾.` /c;u=M؝^oIQUW?r2#n}׌߾#}]w3r;(6{AyɿʼWo+vizyV*y-%"7R90up)gw4GIH$b;s."U).,\Wo>urW`T'XV6lkn-g+Cf|cX|rRE1|s9`K?^^"+OcgLU%y蒊`]4F Bѭf1Z*e`5b<gzQ&5D>Y<5AVf}xҡ5|;e"l&x@IuӮolD??7FoF#|w}STP1}KsjՎH-_bƃOfяmj1b)C'C,R4udww H_ȉ xy%iB6;񢝰2!aˉk=QRbG}7e6%?Sb{y];iۀs Cjx{c2.{~ 7Y9$s۽ t4ϿA]~,ֻGR GXۼgueLۿζ@t#A>0o!ݍBx}h3rfUۓޟFlUU/F}u,i[O[hBǏh<.N:Y% ~?/_Q7_ⓕO^P'n)ȲܳOVy}u=xp: `X̯=aq&Ifo .ԗ_P_,f֎:Z>O*~7C~,7c}b=jT- @rޏ;|@xm+7SI*ƿ^/nbd_[5*- [k\lUh ;]L>'V]͖p)П|9zO-K\# pK# ϗs_zqsbԏ+qiE/jl9@n *'v֔Ӭ%QK &fcXrH&s&U%1ZEg\bM\T,}/;[;cIÃRRDSJ;ŹJ6%K2?;;d?RkIМZ* L@UM\j)&Zη*?V، ٜYLk%ё&!Q5&YP};4ZC)Q_A2e]ڦl P} ׂENY2d C6:熩(X>]!B@baUri-&F̄>{' oTGJ<@@#.D3|W/NY."6,zNZ)3^N| zQt*Ațµ4pNUa]0۪U גlkaCI5.'_\-q G%t7p6oD 3!)U>KBj~IА$!pֺK5.m|.$$UhTXؓNŊHZ0dTSufCI,1sC2*uL:ZfB s[(ޥd6eU0 VɎ D@)[rv[1`Q n<+p-i簾nmgJʆI SŁAWϾ"@}l4q, Նm5e<&, qAg\Z5TGV1%iKHWಭQ 0`$=WlTXҴ: Ki9*`FsCiޅk"G !1a.@10 ܂Is*[ iR$L@6( %M), xFED4f$,ӫXlk1rVp&jA ۚ hWNv|օAw*8W0c)˨uPO)0,`.ij}扜%:(dĭ96  b " U ";8DRP 3G(Q Дփ5x&/ {R AEM% T gvJQ.`pq"*tdIw= dop_tT*j8i0 ѕp!8 ]Zh5=))"f_W)|`kIS KRDAe͔2h5>A2Hb7l5x/Zb!z4[ŪnT;\``A#b.n֓ɬ#aR,>e ,#.Z-tgv7o:3@fTT*5/k!Vfj3Jخ}v~E|Yu/]A9) `&%>|"NXRHC%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@G\;3R!`ϲQ\\@VCW!J/I tJ CI DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@:^%LI8{ZzuJ 8'%1*4. %)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ Rx@Z1ՙ@|@[JvJ f1:F%A@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H DJ R@"%)H tx%4Z\]!`Otpʅc]!JN+ U'b]qGh]%,gŷ\%{`遖&?2xӷ_W9ƹy q7O_ng^φŴz򱵽 q<2h뿨a>1(r\%Ǔlgk<_)kH^OQXtna\2O? u1exy - ]m1ʲ0/zk6|ӛ}sDZ]!])o]3+]`X6tpͅ>dQCtut.vȆu Pj&P)}Ft]>3ױl st(QҕƋT蒋R7Ơz u7 ~u#%iv5 2V@:սhSqo@,'~Q`CS %*- oRe7hl+t.qUDi G7B<9/g9כ\ J~i4Еf5f/r'tt\^vnh{mvB)<mOW{iz5w*\6tp_xZt(m΋ЕXE]AFfCW׹\ JtDWGHWR(odFt% +E.th*+#&#B.dZMth{ QQҕXqPWɸ]=t c ׳?EZi'Cie^ź4ᥪ+WJ2PXm+ÛWdx0~x0Ojv=-EoO\cT.j+J}c3M%[U% 8MJ/iaeJ//~ۼ*|kchY)_'4-v4QʶV@k9$Z_\|GGK9qaS.ڳ!~uZz0wɛnui.V]pabPt?:fŷ?Gx5|V7;Oh8PS|ӧ kMpmޢE/4ml/%mi|ӶArڳ"Sjs5(`؛GQ/-ۯ/2;kVuFo'ף8Y%R]{)̊E>9\'o=O)~<Zٮn婽qir)#nx[]X8ޭ@{8w7oBکswt>F:'}GVi̵?B_q#MWoOw($i2|#|VZB2QJZryM)*@SgZXi7]L!yuDxTf^3ei'Nݭyglcݱe o}7ݣ Geu9,mʩFCb6Un;egl||pl3Avvjۆ ߱imh3{u\gϤUso` .Ҷ&Yh繓O67SAo>m00] s歂s3zOjVK[k;(WjzmzxVCۼ!w 8g^Jא??Y9m^я. @.x[w>~h/Of4K #\,15F, }Ft]F \s+D [6=?ef;V z vCk^vCl ߁8վMխoƹ\r+@k>tDWGHWBKxFtΆ}wDqDWCWR[uFtɆW5B:]!JEtut dDWX ]!\Շ "JItut/n"B偝*JtygDNt-'DBWֺC+@':B`YEW&*B2vDiY$~H y@ӧ;ni3y}j.GW¢ Ԡi+qkF^ ӴSvئV[WڗQATҧJV[UZބ9 z=ϾxOG;ssi+Jv<=w\@'XpcSn^t>1GHl lqyXngnr$BX+.~**}c#|K/WyUL`JqT'j4XC'_I<]Sb* gNWx7^߲|Šz |6uqaOo<7MEpݷAx2l>uAQ3?^*!kxJh?Y|%-)$m<.gg)s@s3Ƅp.1!+eL>DŽ8&n0w2ƠOOL'$rnc~Y|nMŇ켸vk;w݈ vM.-N-@oփ'7evmqkqLk:hťmm||Gy ܽu)u*F Yc<zD7{JnYM.`ø@ZۭIr[>˕ܛ魯*nl;wNp[Cx^vhov%CϤiWuQsrWތ߹_=[6*T벊є2)Q[k­pgCGnz[M(eJYS1ek0$Q&jBT3b2U2NG傱)D-1JUQ1U*a.r@/..檪a)<>02j~~>>dYWC'B,!柳˾ï`+16Y-!{A4"Zc-km$WE gXk|H.6H bYZ<6bKGKhflVX\2KJYpY'i Q7@w:;NEo+^Qڒ}I@S(|cnf~~g}~\3k߬FϺf^p h~p{U{W;!uLE"Gvx~߾OI^12˯,%j?bMn]VQ_(%N:<26z-~Vֱ("{:|([wFTeQ'cNyJE4BUI&#)@6HD!US)tT0R HHc%a.R`fbr\ Jr"Qc̜)m4=Xi){[-`6Oc`r.#oQG۪aX 0KNRЊLTXde(xReAT39  9dPPyMF;H3s .YNYqREqSZ T 2)S$)UJI<4H2 Fgy g'k2Ė8¼Ӽb#_"!dDY k4} #;+t0>f.in^yM_AtL^f5ҹ砃4/A kS6 H]v'>AwG'p:/kzfJsJ ׭&xlJuJd@i%ȮxIqح4N=iINh Ʃz (z) SS(I`5^s.hhL[Xc Ǥ}=m3 N' l!ymkُj =%:d[_. [>@R"Z;*>]Q5́^']cI)&%$pPT7;9k#j^s7)5H§F#E[#a#J)(D(Gh0HE>!LRE@UA߬uIRPeZ͠V\NG4ode+ )KS 8^Qhrce18XH.R(B1p4 :Ŭf}G &7j%hX>bȁyPl8]VI\͈m7י^5":i|!"B/LBQJDٌ XJ{s[0Sf=Xﰍ7:F?}.fOWEH.rە@ P6Ql&*+ēlN~+'ڝzIr1䏓xYF!?̺{9d?\~ D*mtBwc謍)eEA1-c4;eESRnn˦|%ɷAdOtH ̡0l,jϿS1EeBJl<&{Z2ZUG*ocz潛 WM,EFjsu_ O}c*^(,MH\4څJ#s5>R;]HH#‹DڗOO8'0X5J/^Ncm˜0ByXy+at(11Hi`CuLL."AJf. V^1(x|q`hfGMN) E΀JRU_#x~e~V{>vpTovxه}닔y)>U"9FEH.hFl&#)JhD*Z76g~AiQP6V0`JQWҲ Ipig<ϣ+Go%i LUM3C"s릂 ȄGg_a"y:%>CG P>Z3@.XF6K-xuuE܋y|D6nGRGP i)}\ll%˔Q(j"lt4cDSr8G=]L7wn׹Veշww-gALi}'- MwPןuOWXz1ϓos&Rw3Uo[[OMfڋg<͸ jzi)^Ŋb#eZe!cR.ݺi mgk *ךo wMaVoHN~[$Vg3/@tkA`_}r72׭_ل4gyjJi/j `vUުTOiY qbիGkEmzans~c[ݻʬ_ϓeofuUmOlsE 筃B}J_~DJfN~FL ~? J:OZٟH6d羪/6:̛E\,{b9`ڃ5WO|*X67>V1"N~/}>g?J9ikӕSxEJU< ;We TG, kZwX[f=u/rO;Ϲ IqvQwm) 4к}+SI9:,V.**m;nU<ﴝB"Ո̱2ی!ǥJH%V>:UȆdr:Și bb1¿K^qv$@-5Xi9,F*I| 鵪 -z"fw)8&AXκJ#Ԭo/|7ą'|GIc҆I䁻!cg]}~vX^^DO 6@'#tL b؍:*ђPÞn6@b/([oV$yCNHYTnjȨlJ, B\Wt jP"t Q%B"O*(SR䤱49Dl@m@ytnNrDžkk~;$A뭺r0=hbF ʗëq𘵘OfW*QgU=y{}ͪC=tl *"`Vj>@ ΁ۑVzv.uLJU4}R((gsDWFk+tDd .Qӥɒ:g 4w/EE,-Kx^< t`a%JM>dXY)|o&Fc(?Wfg󏳁jSҪ<~jB}8 y%**>}SW̬׻{̀.W]e=Wh~z_ )kEߦZ`8otHJY-2Y&)OfwB4zBHEŠ4s=h9%adDl,IK,>9&$39* ,S QYDJ`D 8Ψ)tg$s`BUl>YɣWgW_=bzؼӽTGl׽uI0(|xe_|q;M[mbuW2v࿱T[/!ݪtCa|$۶jG5ncSVz>Q~݆}}aܥD_}uOmuw[OۮyMCKVosq=DDP6sWV /}ZQF? ȳ/PvmmlGٵe@` GcRv<9ODDTpLNb\ɘPQ9 ##|"ݑN@/>/rnUOӋ݈   bb} z*m2`9`D })b!nJ|M6Q)hJFd s;a]29]b."8b73gYڢc668FX`pMdC0T隕ʧsB@60W@̐1 ǚE2K5GQ _>ID&x863gzq;pj/5=80w@n,Π}kO=d& 9FUm@J Ɍ?mo'o6o{#m~yZ , !};C=@ad;E A[FSj(=cB҅hzq*O-"ɠhk1 #gż큳!%iEo:gnM@әC~/V 9hsE1$hQKhBAp"%%1'BKX@@!5[Y×VWvV! )$TQ}LjY+ȯmm/6d *r~1.lmڒkKC+R^8Go͋\2}м8q@oESo5#K"g1g=(J*/RMV7g7Ev(ԲD:τFcOh>\N0;-D!Ra:R0$49+:Smz(hQ_9̔tkavU^&ogaL˽/[\t\)Nr+a&or?Ho5 ʣyEJђ||-bl|ylC;f@LOT)J#U*) hP""z:J&(.qONJ2Azq++6: 8cƀ"`V-Ee"wqKWʨûo}WK*$,h! bd*(SD/1jsFPRN2kh}+H :9|yK6FcJ2tHI.KNx19|TIr 48믲Guxc})s9;)8gӷgK (1Iy,7s^Mqi{g*XN~78eח*@n;PG@ U{J0'S @zzCN0`uN#TקkɿZh9rVƁD3h)s:pUt"sQ2tCaq>VxWJTOTb*Mt~>C_*!WL Щ_ln/%O ai[\'.̌\+gtQ?Ck*Db1:ǕٸG^Bx7ni[֒^[^ ] 63(oP`C+#G1^n2YqaJN:e}sx?N#7,}a,* NG9c|T"OQӫossTE^U懋\`/ś?Qf) fc\l@M'4mjۛ44_ixWiW6%~)>ȴʙr[ c_o? ?v>MR2[1k@p=.$ ?DERE ]ހNr۠xIhˎ$;PO9@T<ݷ$#$g#A$$\:C:J ;|n4SQCr69i.,RET `D啕|>ĀÁ׎T 蝤hMy6Pa dR.$&҉U{: P 6|Y*Ja6L6*cgtYoſ-(L>4Euΰ#Ozvc#1e'm,$Θ ,x*DjvKM&܋YSVSClŬ>P)t /csy ?*uZAp-_6ѻ?D2틶/5/~aXՃmgtϲ\֙xcI im$2ԜVOi2ZP,I'Jtk&սI9[x0kIDGJǨ-%hDhMMLZǺ6YA+1(c pBt*0CB)bh'ƈw" Q@8# LNOHUf_:6WwXX\7-;"%r2S g|'$"BB)/m+zFTicK?j'Zsϊ,o[fO#ۛ hfr] Zmn/R ,#dIunFyj<J0A ˽W:hItۤM0v Ҟ5Q2:ZR⤶RF OLƫ)ٓJк$1h1yGj3zȡ[zix9Dμ}ey:R9<9lr/O7}jLӤs?MO SŽїJeo͞-mel}! Rvp㲿d*4 oH9|b[j`xt lv >F;ǸRP dE/Q3#S3\ zxC|X9;8m,*X '1Jv**M?Ζ\o42pid%VY(B%T^x-{~h>sxt-ZE-h,XUApshUAEfYd$7j~AUuC8)(QN-JѤ16&,l>-NdD򓋉hoTkS xr'2hZy𛑙ʹd>Ӑ:D-D3ߌT ٪oF]@O]]=`JFUWOVi3iTSW ꊵjݩ L7#FǢ2JT3تݨ+VƠō 3[͊\E]& /Qùr֗ Ju0㵻 !ޗ*=޻RkJ!4 މCB)6sξLí:Ef7w=KS_^X;ѳ/;:9?YȮok%ӌw~#%@IG`ZD$9TP I )F"-B!ɕn;)_TQ7ɸӋ5t}%3㾿׷qU6 BFun! 8۵J?!v/m)J 'x<8(%c`<ԃڲëo0IfKb)91!ȒEN ĻKXZ;JxRSEKYQ(`Rd _%-ʡG& ,tѥ!Jk/Up-5=1!}v3|eΖjը}J{;9)$J0iA98L4:ͅ` T;I~R6HSAfhBpI{Sq( gzٕ_cnpRC,};U3d)$l5ͮfW+RPĮK wEv ^ig}98?un[En-=*VcXn|#dYԳb7.tԫKMb߳U+[z^*lg)zUf'1]JŨQ'g!BjkU 6$)NR@2JA0"(x!t(x22gѩ"c`)IcBf*|A{!,oEʢMDG HgH[Pr3cmsJ:S+MT7(V^IՔa6,1@1Y\o+_C\orpZ}Ұ/@> TH` H Zt5t:$+\[xdF2*yWO٧ DPʩ AJBi >팜Wա}C8Ia׀;P(G@kIjMIn-JĔ% @RU;؈ 7Fmi܆ܟ` !>CUZ EDFyGLaWz0Ri#)ѲkN,SZ $3r\R U@sAh |9;|"ݱuFΎr՟@̖/ƣRf+U"i~2 jI}F*6GVt)Wz=  >i﵌4[%PlpdP$H[B4pQ'~"3'ɔ逓NNОr:K2.kq9!YY(V{9ףQAqt8yv9aVg<ѷì-]iGڈlvFVt7e,!P7*x?7n<"LEoW i_3y𹎅oKaTH SVËm9R2JZe '| -m ݉Ǎ1/19ۑCAj!Onx\{dyk C$CYyV:+kPb[7̽ޟйu*KVG'an08с [B, XlEj@VޣghSR~WToZ=JIH (}TP D&K&bg`"r,>2MFѝy&pf=/T_3_ۄ;yQPtJl>momػF` zo޼f5KOP-e3ֻ&#F21[p& E’&d)Pf.E(<!!x di$>@{ҽُZcׯ׃zȷzw/kv#?zoci%1(΄ z8J+I(ΤRonxyI!ƙhAtgrvyʗߚ?8rFm2TA:9ZC[Ղ%2Q1LT%7J 6 96=+Pb)ma'sٚɔ@DS TEW$`KI&w|9;V^3]\OOG^Z3 b[wr1#v3>.Y_a›-}9tϚJdvzFfL؅w|2k\9i=jHImu~zd, nn7O9nfN#,ui7-oo|VvP;coyu?wyw{eEcVz{by X7|]sVOKG_\;} FD_H!J\~ uAEFب luV 3R'EU|"=A~jԽ_=VjiHE(ITJJmv^P  G v[MphhgnJ}|fe|0XZMX 2?Nfq29g|dr@k BK6 ,ِC2Κ]\J`1PR4^4BUQ ޢ* ol$C[R{H:(ugGkÔlA:("\IR"*P}F+kbKlHPAƇvˋCg(}hJl:ݚ;xvzrq 5Z<㫦p8L)*F1JeAjgPR 6c0,91!A{Ӎ9Ӡz֪A|8'l #$XlZ9D2m KL^#+F[ <y:<\xZA€)7^V`dn3d1} %!,MYj<6RK%LןOB }bjta8TchQO2Fv'i;݉^BW^ Z]w8aFMT(φZ(Flte 7)_- jj9Ι9|eQB9gb.>E P2/yی"S1BrBε]Fܵ=K" Gcy,U.k| wY9`Ҳͅ<FhkqY/f}U/b~ Xom*8uj>J5ę_y3?I}lpo{1ǫ>7=_m5nms}Vq848΍VT6~~_>ĽW]'n^8/Wvyk ޤݾv1׳r[m_}y鬲2o5 ˝H4ec+3|e2Mmt/YmOo~b B}k5GrG'6p:;L>8m-W++u]5rv]#}\lD.Kax#>M%\2OLӲؚytg$t9!{~%L.N.E2b۲Cd`|ZZ]ǥ4z=]v&SZJޒPI3Xu֑Eѹuz9e˱YXu}g:>ýSԵx6Nq3`׷[=vN !#Mi7A$A$E^STblQ\ ř4]=3ƃCcF 4$vZTKt& HaC0PTԮT&a֌m㜌9Kipp)j-ee 5F &팜6)ZywRr¨ \WRuyy⴫ol}roxɛ͔@ D1 MM':#TbF+h([db^n΃m{Y٬3 8\KP|;)KDftզJcP{hyBXwb1') 7&x -iA ƭH̛Up O8pkmM+ϖR69%3 Zbc.%z!x)O,\H#ԭ ='p%&S1:d]% A-HՁ+ʘCW6d7{&E^}$b˗ѿ76%zȾX}ȯJp;&Yl@6_Fnm~ϣ/Q:Gr.ONiѢg1-R3@z)#R u0GRx|N3Vg44kފ9`H29,Mm:*rݒK׼n'̿lncbmHwՏg7/PNWrв\1"U0Vq,9TabhبdGc%; f6@21ĭlH PB,(v$Ң0Vp1{  `7̤H0Wyu#=Îʘĕl ^st.9i\94\%/'<KP:sƲL3 '6 t~u{^ض9mWZWK.<+`U n;ӹΚW]8dd8]Pb15$!*Ew輐ܫd$,g⌞ueEiAf6L\֥ȲR ke֒ x2d&r蝑s=N$ ]_By2sEVIio?&vz{o&n檴=| 9Ĝ%UvE=I vz|a@n2ԩ j"7irQz8Q2e&HiДI"ITi,{rAxp aH-YӤR*sdlJK>G@L3њ "Gp;+U^uǨ3rn~ޥHn_m!b©ddAy! t DΐH$f2\&TPdA$d=PVP~CSڏx' D;^Eo бvF> ~ˮ-t'lGro /6:o4;K צ nY{MP%CZFa0q׀Gq21@2*S0#1LVhr{ʑda80 ѓѥ9KgRI'"KTך3r׌atag.M+ݧglRv\\52 o`25vMX(ǒYHE](T(L f -6@% C1\=Em m^ Mvqf.mV2 c?芜5 9sWvgܱkmk^% !R(Ytt1 )s r1 VPxG+d!2IVtɐǠ 8ÜpT ::ևs>lywE#vjDٱFF5` (aH)J 'x":^ DsGC pjDPA%ƉKv%h$Eʤ#@4EH*QԵtg\?%jOzq2K{qɦz;֋׋^2[XU;Y!EJ,;fdR}J5Qr͕aQB/LݕcS}qkJ=+5Fv|][7Npy?6\ޏ`o\ۂ8/ڴok7y{i(tt2َ64N ~3"yS28ȵ^nE-;hxmGK4%h(Le:R\ g?gֿ .P`D'רDJBg^t<\l0iy1,O eTR5a^}ѯ՛RTǟ(y߭Z=LLI|S6Y* ={9ObY~"n ROǣOh5ͧ^h;k\CZg;eUZ&'j/50N4OVg{&9_{=1bB^P6Pa'Iìq:r1ɔ A_,ǣo$ ) 弬ݲ'Mdk)Y+ؒ$υ K* 4Y+CI&3Sq LЮNwfl~#4 xe(=jen>JǷvmh ̾'4{68%ky},<󹙗f.o>`ATKjR95ÞI!ZҜ<|s8& 4>GٰYgբT]*n  f﮲l@\r/R`He䲈 un5w'"FF!:]boc9+kn0(M!ݘ?fیVc9]L /ȏ ǿuG \b\F˵1[ JDaIOQR0vǫ5ɞsݧIy~cmo< @JqGEEQymZ-jfI1ZɈTvxykxɸ-|! }l02Y :$H"05rc-qP`F'-X#oBcJLA VZ%؅<܇!jkRZj)et|z}y,(YDT\:ջemcw{uk06&t[uБ}p*AN A>WF0qULLyvXB%fUř(U˕$Ύc~T >4\IԤ`zZUߦAn{,Y728}Ths9E>{K&<~>8º\09[k<-@\J{ a,@-?ZD7`!WXZv,T[4OVD0uU{ P{. kGURCB9Hx~J(h Xn94 ɶ +LTꗟTѴ}4h|NwT ^^Pυy AۛJ{.gtpt5^M0 +"P3ِl7W&s5&oރW{,652\ +;8;B<&f&ߨlJ7O\P{ z̜B.AQ+Λ9J=nx lr^5uU5|_Uw>us`+yéO+s+nG.UW3nGH]PWJjө=RWP*ިBCo"ߎڻ>R^]*!^]="u@qCvx:|S7 EtB'Ӱ^չ\k#Of6+a I2Xn. Ct_]^VCl\~FAh5:_ދs빞ТfVa.^htSWݶ#Vwno`h( 0zu2T_bm\P1nd$m]蝖2S12 B@)3 VnF([ͩ~Vʭτc\.9-s;h}qnrT.|ߜӞ~֣+&4>wןmgB\'P׃MrytܜUḾ}lݍ2eKoXKB (Yt|Gz3]|$qw2mDˣ|d.EwdA$XR*O@lYV< C2$ G9*l._h>}wLl8,!ptgL+GA Ea`L`f)1NDyDB9LP{AT ̑ಳӴxf,1PDQw, s[鳱즰  2L_y>g2-41szjU܅iU*`z!Y2̢фjP"z' +t"Yf\HRw6uJfM&x:[ ڐUa b}٪HkگWONp5ѷW'%Fr-mzLzU$e$78 {;z㾄H\BMM$ʬcz[IkVt?8,@,>G%ͤ`/Ӥ5t&)!]c$hU ernx*' 'm4YtټN6L6h f ˲"N;}CmTml]Ė(+|pJs*p}^}[]/7xV:B$8dF#dV1Hp-f@u2TJA+^̑f)K("rFܒ$e&jm W,I#I&FmPGN=Yot~dmAT}0u4*%.; <114X0IܛCHVQV <@}[Qw/<`<7SG}+T O*Y'D;qggNLxU7U NWr 7!2VCxD)ҚPd UGq8qq==ìpրiDFQ-ITkIdE"9 e5MFzkQK=P[zM=? EK|gkV:,)DLA)wfo^2SolFNŐȣ?X*c\Mo:zÐR:g n|''!E9JPR(M@lB&t;4=_(t )O}(D*3@%\( \ᕐȈܟ -dBӂg'ţ<tek.GUz (O"*!'S1'(9B [ϐQO{Tn&xaX{1]>/u Rk ]SM2eꭅ0K[o *腴#a 3@VزWS eY}wyz Z!@S I~i\e}pشMB8Ф `Z0!`Ƚ^i:Bwv ݎvC{V:^Yua;/dj"L.!^߻^7_>^qWH! A< |\o=\USӎQyɕ$Ji8r)t7O!*AP:xz=R`y܀`Z|ͩ!ă\0(93^L@q.Bot3 4?߾M ._v˾qlZ w82ُ/i_x/J A"Zr,dA3>`&E1 @ Q% }{n ިg:]]-R[ޫ'jJCk5*[lҰbW YFXjǝ_c/KEuQAH[ uڃ)SF:ZD*R!KoQ+:|S]ky(h3r I!* 4roi:%*ZQȳU[k4Oaqo.|g?j<>W?Jf_ܹ(t2'0!oW_~áӅR1VA|ܠ !5+S/ rUBfє%ՉFI]"^=={!Aśd1^`r]K`z]ˮR7yֻ;Ll)r;_"3c3Bk1cZ wj(lśM"hW 6XSSY/!e=G8|d?|ꝻC8~ݕfH3TUr4xTt)%U:@Pʱ>} U-Zhч␕]@}RԷeww؄t'}& [Pw8aJ,Hd5$QrBro, 0S9L7DcS@%8´ ϵ.&"E&C)lA^E @"Mv릂윫bqW4: Wg"t[;pyũ,i1|BHX$oFX} ծ6Ĕ /ΎYlytF}Ok4. 4]\zrq~ouibb/?Ϳm!pq?Ki%J)[CX!\;kBՒL(G^\i_MmE&bW^̴a[\a[㣲&LoӪ$Z Ryky w:OTm7* \sdz .f=خSoƱד,0qeFn׾.%ͺFߗW7 870wvģ&ntVO/&ɨaI?\(hjPV*J.+Ӎ]ӖyEM]H嶽o+9TP͸Gվ$Ht~/z\M8s#+WNv,.Ӷ%ګ[z4 r+tNkbfgmϏڰUoumnnKet,h [Y>spۣ[A5~Vߋ}P4暇:Y6_?,ǿ)z~ۏoyϿ&޼Wo~Gq"#p\rڽUpgy'AW~CӡWަiCxMgvWyW +4h2!_ َ VC6lLްҞ/c"C*)Xšr2V-h]1 jmg#*Voh!A \uJ^╩ss3N =\#JKts|!Z-RY,ñY [Y % b> De~2w:r~9,ytDl6fzC$TdLyN34l3 x34lp +!:jgr$eN WʣCuRb)VMA(Ia@,b L 69:sSkM`QuΆ~~ ǻ\#P0J;m񂊲ɰ&!WCQ).zqE*.P]A*p5b+ymUUQ[QdSKrgE&͊Z޶H`4_ģC>8rw!>?}S[˚<^_WOZQ-7K{ ןmȭ-*m;ex ӑ[@"p9CZʂk0x{ҷޭ#_kJl"XjKc3$Fe&͖Vf ]mk wƙl708%?А~+x}_# =z8ڵd5hPD(uꨓೠ1VqLؗې(Мj$Nn309l٠klkj T/\87[ts.Xvձ6`[H.Ȫ1)cJѐr"FR ]< .562RLk[*|-xȂ ŋΦx5\TvhNci>*&8vf{r70"v[ZDlqEk|2B-آFWg8FHi8&/ kK}a Z2(o\D Wa6;8eo ,F"Y$4M-ǽ"S..7tmj]t]l-NlS*n:\vFdMT< Ec0}є:vK~Pr]asGފlO8qFvEHя я+xHd {=o?8E5tq8BO )F!q,@Ȕ6,NĜ@*Йu=ըaP2jbXU鈺Dˊb*tFĹe Qy8<:yLQ{y#z!{VF.9ŴZY@6 q+JͪZrt>$ Ⴊ,Ù"3J&-O<i #.*:˺.csJf9U]Ѫ}uWDIS3'ҵM68J@x$ \]g32)YrQzJK};*oTX&_cA2ہd!YZF+J1I9/KQ5CIUIO:6";*k~ ==2^N)b> 8P6kPEFTo'SI`EJTVG\@mCx.dG{,'Z3G5{ 94-HG[h+ni:0Ht|EOu98?ޛ}XTd9ߥ>Go_2,ёOns] h@;{b *IţW9N͹vlEDi??/$5W5)]l'y2ke6ǽ(*eWvN~{-6cnqq%~Uya%|g| ^μ6R:K'jw6&nV:yzt ՅIyq:@Rm {N)Ghdt(U '(u(u.InȕqWPgvɐ p KB*ڳ Pޫchf5 )%WlwR#c&z8Eo>]w_0bn5R"RVk11]jWzUYn8:]򞠷菉IoVWP[b"1x}s^tHMMpq[=!˸vWFh04\V wj4,eeLఔ{ryGKM6 \ NqUtҷM?`1`ᬀme0.}poL:]9mWcom'kj,uf䐟[,:@)t|-(skI ZD2XSa&;N!GjKėWI܆Dߚq:@;0X' '&O&?SOi'4)픟 ~:檉kS1WMZkn"d7I'dO]5q)<s%oc7WMJc's3u:2ֽ]< 9YHd9w{(Nz´3e_ZR_͎_eMo`)|)5xqez~R4>WU>[OYIM?,Y 듀ϢLt́KHꗬ~v{뤨@6!֌6 o{D:}oe7t˝ZiHqbt(^:kOmOm|7?<o ҉Nm\E?~Zl C) UVL'_n,fjfopIFoII`vOg$'xU1& B:$X9kDC ZIыmt>P\"!y3jGv#vcAN< :]KBvL`JRXPVMo&APeY)}#DQ Q6tbrk^CYS 䔳fe P>+ٳ,$l%tU #-tZ>yeO!CJ9<+@5631Qs:+H7qUA&q*K7gN+5UuU"ʄQ6Z~.*IYCY&+x14Z`+D ƪ@kә7=4v\&hA\auvxpX+ָԫd\ SW~BpQRJ ϰw8"zmNGo:MP#5 1`E)ޮv#:zmJ Q+`P9@@?ʪ8KNaBA21:|MH/ I dv]PS Z{]0$bt{c^܌ ;C̫HI9 um<-ZmSck)lŤo8ت譨7Ad]Ykp9iN4TZ "0[M mi^%z++ǜHqC#qk+zUk]P oEJWAS>(r*T'Q1zM[J|jZo[hN3@q#ldFڶlC\6k@͕?{OƑ42[}` FB#IJ2S$%)ZjJ,QŪSY,KϬcD/ׄz :FJQl oE`0鹣"Ʊpo,ZYdҝa^Lra'zgrU>TȖ(hJa 2NUc("zᘅd_.:;vuv x6-p9 Qɱwl,"')0e$ZD[sc[w|ʔ6z/+Ɏ[ƉАd'q]PRgSqfXk9p,J{ڈ5=zt at`Gg'ء:5Ag:3U;lrߺKBA!9ٙ>٦ٵίX1ڋ@IXMJgbC,0P$9K{ >dAQ\6,rڶ?ņ`A]>z 򫗸w%K|X٫}E9\aWÛ̶W G1L7u_'Ѹhz~y]ffvzפ$jD[AMG%C{QPр;x'KM(VUwz+ CSRRĤ43i@Ϊ"|&=SNHoJag5.*~7 'E ino"V%9Tmjˆ0cýg('H/ |D V1Ax,b(F1ZleF͝QFG ^T PB*:ԵWg܎k/wiyol|{o=|fi9m9d&aAV{_a/_ ùC:#DZ#1 vRhx[/<(i|Qr )N(B:W>' _aP J R\yەy2i*(̑ D&HuLtJd =!"}Ogh4%n9s$uބ3pHd' wMyiK/f ɻ-,㷩7D;v`äF-]Nt2'tNH@^ *^{d ,(1fSQ){ O''=<_cG|,4P","a9GD ʓuܿ?7#S)%L ;O.(>heZQXUSPxD01EU8M`Ұ#} ^-Zpv[ ! CIBQך`=K]uՂ*/@(xD#2pԪ(sH;7FOz2d.Tp(͈p;T2*ڄ༊VLj/C)@Di ޮ s*o˥]viŵ;NXJ;2wP~g✺VɊjz'ep]m)!^^04-C+5U3av9$ғ Ӏ7P{dp6}m0t>D{\/L2C$!GمVڠ`d$4M"J5Uqz|Fls* \]0Ü9I^FNټLyqǻUᦉӖW:E.6iKSNfJɀP anЕNU k6׼=*~_>ϲ"[roV6m{>J|+g4IÁ\Ɠv4k/^Ҍ<[bFG չb8O8:'*O-> 1Gրԓ>1mpЌRZB1g|<$z0˕cϼH[״JVbFbCh/]ed;ºvZ^oft> }R㓼/F>̎Zh =h~EP965)YOk gIʆ+iգo%N$BLFwMY{<ϼ`{Ur]ca|07-ڹSw`Sޯ1%pU%jC w<lL:kYB&]tm5UǤ4MΡ2,NVk CsoH!^Fp3&ImʥA"fF7NsuHZ*t#< ɽl8ّCX_}hyR5-8I$>Z#u#kJ#bO]I& (i0&w=2aI:vv6X[ՈgStP.{Z(7+)q@;;O\=|zY]t;3zwSiD8,@r!MΊ:aosVԮ{N+w<2<6x-ЙcJ9bQVCX9¹$Qm '8`1waʽ3ø)ac3pt MQme>W7>9VgL-XFW@h]8ķW*C˫$G㴘?ˮкŬ|0kʇMRB<&94sg~gj2>}zv> a-}u?(W9EL$I- $2(AxCWZŲExN] {Pi,.ӥ_t|tyZ?xW [;б5H{SAVZ [F){B3 #.XM}@i+ =cw;S-֗PesNSP,yGD#E)#O^jB jR.%6!`BL1jv)hA@ 8z0 {[Li5\+*vYcY@EIYiG9ˑOCmE J/A A)9 vB!L'jŔ `\H"]|kuн& )RPp-=\D22(ĺ޷pw͆X[qIOu61.|{T+̋? ptJv#Maupa9!qDgh0 sT y|#> dÅ= }e(}wZ&2gɀe ِSD&-l ,lD uYk~+apP& -,N\H*KX;w6919 7ilR1mbb2!-=m˔hyU?*$3,REāFT4;>q#Ll&WS.&&_7/gUl 0QޞTJr]W#h<|S7Xypjw+ z/7?ah0^34Ul9ro3r-Dbܬ܂fMg#`eb/hӽcVW!?.|:Eu2F@s0\ w3 QM-Hly">Y(0­1sU$Ե\G][o+BYL'_ gg23bby$纘ŖdEe9J7EUY#뢣I&JHkA\ Ƥ!J]bvٰ4y]=_qacy<)2H {2Y+~J'k뉫")0JtڜU O9M ϕ; ]Nv@yᕇ7˰m>mgE:%!tLN}'If$8<"aqGJ1(䙀KXeE (*PO5C)-3%6R-c3qnJ3[lflk U[[mj~E +2^_r~O}s,gޠxq|Yvk'dXhc SOUP'( 5sia!9u0TF:"j6tFf_:ŢL$]ITDs}Vnqr/P}fǶV[7z]7Mpgƨ&Rltd:St-( 9; m$2#CiE'MmM&"cH5G8*1&6v{ؓezj"6ZD""qmIQV{Td Z3D#Lj2H*xId 5̍ŐyU)rRb& cvn&Fe}l6K]t]l-NEmU´HI*XPmD>賖A9e xvqoJôƥaԖg?`nF9ٯ3cG8w7pQ[3oJ 6k~pL71{IݧOZ1'e3R. #8Bj38B`#dpVJ.M)[,I';QI18 )g֙]2 uf C2[ Fk CڨrEE"ːK"#fl9EHt>y=oNkrLӬϩ<z8}=&o)*' N"P\fQHEfpB'@t>+YzuSP"2oy&7U'Q󖁻>w5NΓhG, nr`9 m2ɧ.NV1MS'VrxSUwTUN|g=X7 ue)-}>.QfMy]>%pU~7-⸋t3HbgŪmYlyc@Ɓ􅀄29*t.QI2AG PbRblIttdjR6>[h @#~d󟩏|>]6roWU~nu|t]k>cWD5ōW/cGMp3CsR(ϟ(<00 y>\y;?Yc!)m,sx=+E&H@{=b* 2wJrUl6q:F5#\@E{055T-ˍ%z!zVwU?(4^"l99.7Rs4ԊMvTn\B6fnvmD7~txqxrZgϬ1 M;3> XMLG+΀U' E]X;b5Jլ|;B ).sM}H:W,Uh|1T"K%r~bޤU+Gi"F+iw3'g\]x}IxÝQ7<|ߋ~Vuf/Vy z5eHS⥄4YDNh)r5?'ϖ9]qzbJw}Ζg k٫gʊT$:T=XXj!S'FahRH _gBI-y8&:/WR]n4;:<9]×y((di}h'yn=jұ1W_awIbIy\8lԉ,Jc!mJQrb\ZPj0 a؝a.9@@f!C-r5HG+<aJyqCwc /݋XW_#tNmn\'yt HSzwr][oG+.~`,Y'18}FwuE"e^,+翟Dq$jItip}̈́IĂt_>;,xW}?';iD= iOWHS^YQ4 zbr䤪X]ͭvL-Nn漱vjd.c9PI,S16W:Vڎє|WM ^̆,F2YhmޔA[g`<Ͳ\9:Npko͘?n}:ffv\n%Փ X]-Ef5g!|Xp)5W~];Y)BB^5 J7yz~8voy0Wv itج*Po40nFP`}ٿB]1LkUf7ezL?g̴\ %݅sWM p>}i{&i]3O 阠rpZ< gq&̥l:y>G|5ugfWj 8yY&7na[_?NIIȎ%I@ǟW M>'9_8sR X|9ײI(=&Kp"UhE:g"#jN(srSGn/=^;Mnx9!m稌<%FzWYi18{\>C>j2\{* O?~|~޾!޿_7I{g~BށkC/_1-547В|لo3*^CbƘbmw///r$$QFuևȹ]6F^tVǮQֈ$מDA``),)^PMg`Ϝf4` e-AɠJʃ,e=#j ZL Att1rn׈ڣ^;쓯Mazo֢VfNh1n'T'Å1)&5z>Z(½؎g?niֶkȭʙ17؆Q4ю[)Q)n'ج\kKP2T"pxuPBP;EIe`=f!f!~B%)uZ#-9(`xBDTroY,L DL|.!\0(9C D1%b #bl9>E=ݓo?rqcCF=755::mVޏ_{*MeEEIqs햃H+*OOe ιu)\#rqvb.ĕ':'IQO\RBRR^JD& i(or0lBk *R%sm uDA$Njず , a~E| 0sɲ\[YCR\k>7$# 9Di ;e,2R+'IP2KdldltV݀%b7;rr&-Oq>Oǐ}wճPB^9;԰AZ^7k7~;M8=x6]މ`N̽fCo\0c:w#>wmasԾNݨl̴.8Ah(S#eyG@Y!^!g}II|̱_#[I}tnk$tvUg^m)^vݼw^[ F7̴N7Wͯ7Uٕ__ڐBd:[{T/TgӶWilVx60=>K܍{ 9!VӋzwߵ-YfBJ%D=SAK ׉EV NBm5Ü 'wNi Tnd|& qWi54":cꏵ>#KLcK[\Za;Ǎ4FBMG-~omP *X9o|=?$5P75,ΔWX-wN?|rM8je8<&ᨪ'suk۵`x1Hȴoju{?e5#orz+Vn˼ ;~쉅u`[]o6nf+=Xcr",oϣ??P*&Þ}m2מ3W+JOpRwR+Ir;E5bȅĒJ&r;g}PU`8*iLw]Zo/Ͳ7+ {כ<ߎ&s>b?to'fO fW怷,=]8q)NN^M yCtA.:Qk4ID#>ūQ%٫سkJ%qdԘҘMgZZKMQ;m Վw$\W~ܹy3{L=>J#J\ϡ|8;~= &'!3. $:_JD06TF4=;Jv|F=uݪ4Pdow"qe4W^`R\4g_!ٻ5 m-s \ T%:1D+@KFSpy9IKI<$zpͯ 'ݤ+XZ. .יrVP 9Z9{JSbFČ74%riJ<j&;K &m+Ģ_ ,JI,@za aŦ jdR.xU!aIT[0kONe/mf ]xs2y+@ZhNA>! -Uʪ\s ͱ:3 g[R'SY3[q\ 6VOtk-4*UIYv,SG\TdItj&׹H^#}KJ+.^ؐ w]2g`jC'WZ B }Fᯄgܞs(>0GkS*zSQ+jЁّf:Rb۝BorD796,.HdKHA HI:^fQ%9KF8MFF!Vh=&lG8kdI>t\qEӪ^U6Bj 7 ]|08^̡cr /ǕopR\'xA#ry8K#C(ndQ|RX!`F,zg'g6^5Ke۷xgճPeL"ȜQ9`a,St6K ٹX]TςA[B  *(\ mR_DK_^gyz P^P}MPh"Ib R5hU!'WAs! rrxbդCǗ"K~OYu%#E휖!dMVlƤET0(lБfV~ bLLty2Iț"Ejɲp~hX+$f~>YS U4@qQ.2k0ˬ'[YmH njmQġK*մ(bej*mEDռtņ&b,7kg\cc8Xc]:;4LBn`p5_DMIlrOݭ(!P7$LMmwS Xjsjha!WS^)I~{HOIJt$\KYR^)d̦0~&6찦0Ï,7vWoQ~3a͟[eZݜ$gPqeyg`E$ޕ{u5@Wicr8t9);YF/֝Xk5ɺNn*uˌJ+N֞,-'T =iɲM8|T||DGC'|]t1s%)>*tUzZRLDmXHj\4ͺ]I%IP .̃vtpHbVK_v1p]#yT-.\]_Y45WMLʀ9ϹaQ䓪ѡV U :EKGMR ECpc .ѫ.)WK Q T57P ~ zz$(w3R_~ ݟOihAui(QDC%EM)X- gpV.7蝥WG _Eo?XBύt. ]6ܗv] jX^_LJM\ÞlwӯC/?Z^'켿L\^O/{D}_f "*ތNݛ͌qj厉糥o`-E ]1ZkNWҋhˆ+E+thA%ʉZr 0 ]1\׌3HhQQ]]Yzp[4<nhZ-zx|ũNL#g*r4h,N$#mFZF1.O^g4dzWDW "pُ,6;;-*|EmzJ-$u> U)W/rʵP-KxzjtoTҮKhn`Ђ}Q*= h78~NCtŀ+ 2U"3z]5NWSoTfdÞj;ϺAWۡ{ClAWfNVBKtEiZyMt,t44DWvpm3tEhpc+F&JK#4DW fJ![+FQ MŮC"N>v(DWHW$< ]1\'Z+FG JS(+bDW֋ч*MYW ػfLт;]1J3' 8mp~-K܎r̞^c }GBV:\>]ż{F6g_B. I}R{gYޙ茓v:AOPՉ!| "v"FU*&PYk'Y~,o4I٢L1ҬՁKΩ+I&sI$^ zfR+cKxr U+<1J7yryji6bִBW.CWSom}۳u]DW[\te'zK ٻ3\uftE(11lIuRBWVԓuut׏0vA bƎS] ].j5Uw%Ltut(= Q ׎3Hpe; գw%N!ҕAA{!+L+thNWꉮ,-mdhދ oWa+thA%ډ<'.?%g:9l:Ma$hI4{`TOj.W![ӌponpF5z7QiG! h-pCAde3th;]1J2/\r+pt}W<rOwCG5mAWnN4^]`hujt(Mtu8tH@6DWo.b+thP&:@ESϐDx(bڶBWֈuutg+lmn;~t( 0+l۱39m ;]1J9YWHWiZ]`g 3vv(|Wm'p]3R K-̈I{đoxME%.OUg 8{c)#4#/ֲ Jٳ$T)Ja2 1jR)b]C3Zv^1\݌'h=9Bj^'wkMry7ji:τm^~;>9-Vku.eُ?.;NͩkoWh}+GkBOV뤳 i}M/ ''g䳆xRn:ίV7H{ Ry]UuP iH%5Væ tF{:3:%&f7'ߓֈscO昱Ǒ֑ ~\o3dTM(- - VRjH) AF5K!RXuݷn\ D@KAOkh^VIC2j(UÆVع9[0f %xD ]0άVzk#Ҋ IeOhBJ舭. BAnQxi,h**u(:ݡ-!x}9րVJʆE BRқC:_|/ bʋucm쭡lvXE+.L-a7tGV- YZec`T" 򅪃` HJƬG6lmP6Pc.roPPCoAydS!\baIp* Ӥl`_Q&t$v x%$X(F JH]鬇-P*ӫl5cd$`l*zSJi_t#fH57]Ґ?;1`uP/ BHP%D&T+Di?hx"W X)[K1 z,,x/0M;*5CJB*)ԙH͇*QQ r 6lC@WJo7XQb:S )tmw,BEgTy'D)J6NZA!N ƺ]u Rq*.g-^TuAT"V_|+ܣ YcIk0 !1T(`5R%CࠄfIY"1A! db|j_Q>VqC ha!:3G GOX'f1*Tei5 !pBK.}?4`Oolzǚꊻ#ZI [ŮΎ#D m3bka'a1=Cw /%e&d6Q t56b ڸLcuLC=GN .' ) >@(E&rZ#d^SP>KÙ.Kk,yOG30}dH֨Vx$ ;< cև *YȩՏoTG}^ }T'ێjJz%B4Da&ALv|_^_]D]eb:\yS6C@FD4=R\;<mŢuԆHT*Qw}TPGQpu $$%6Ex@rFEK֬kdž@qVH blWN,UhE tљtv4YTd52 Z_zkJB82ߢn$l& -/l5S|aȵ6vٗ`z'O[wW X&_owinozLvf`.`5BgekOak0#ೳ!b1j1֚cfԌ>@|5@RMwUvA`EHn֔70hlCiN3 _B\ _RčtX`*نFp*qKnC)cV @?LE<` \T*\/R- ˱XTZI6TI|Pb٥&ՌHPQλU~Fz+ZP= R DŽV/nfܾ-0(LnWAG-=~ׯ(N+5PEqٷGRbhw|o'oܛ|w )6y rylҮ;ˏ?mtW_lZi c~nI?߿km…_} on+}sVwIwjo.T֮usgƛ0GSԯ~|}1zs0^ $'Ѓ}Xŭpi%}"kLN ܂pIMs F{:0JR:G'P qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 *MbLb>bi+{'8vq'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qhLN J;38@@I@N@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $Nugrvqw8q"c%N3r96H@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 $N qH@'8 t>N[Wp)z^]{ow}z&Q3*c\\3q h?+lP#@ƥK.NOWvtu\NKWǡ4tuʤEWJBW]zptM4 ]ft()tu>tel6NDW x"bBW@kl\;]1Jgΐl DtZi~n]]QZ:C4+,tE!*zbVs+|z"`hb]7vb$tut#93]1`+,thOu֣Pz%gWHW!Fi"`)цv * gIWIG.S?a`<~ -?u:xFxuإk(3tR)|?i yy3sͥ*vwo~%7W׻q]oM+ޫ[A77ѼMƺ{s{~ջ\#~ws}U-o,G:b~hqj-~L+x͟6w?]zېcn,WU726B\R uz[}G}>Ǥt+&,6B4*xQƽI~y Y~.THCmeK9mmlJ)V Qxz*>cG>ۮil-$?YL =>"?wO繬^6}Q%hYQZ%VrɸDt ?Fib:BWV4( DW^_(Ny_;n:}zǡ4+SWBW]zme"bLCW4thuZ;]1J+tuteZODW 8i4 ]m8q(:Cn~&u4tp~Q tutE6&++QW6hD]#]{ͤX8 ]1S_<]=]1J'gWHW;5 ]NCW6F * 8aF,th_ eBWgDWZYmtIS53{x@ˇG{#^dh9* M8yF7D ڸFi3 ff+맡+FWQBjv2B9=9]88h9 mPt:^YG9s^;#ehbBWN 8tQ`QW WYњv; ]]YD^JWLOCW ,th];]1ΐȫDtQW;qbެePBWgHW[DtVij= ]1vrb6 ]!]y %3 ]1Z+2]#]LNÝ uat(+X@X`]S }9@vc$gAģV% )òL6_7HWѕsȁ/zUR_t(Wvu {CǨR> #'|GUҙŽꩴK'u*Ibr :r nw2-ü\BL=KGNK&i ŝmA3tjvtEI'!XЧ'N<zPJԲMtE{ l{`"Xg RBW -cm랮!]s[: |=.UW ЪJ<%=]=CJ;DWbJp8вUB)hOWϐƒwɺCtvLh)m;]%]=G:m!UK; &m{Ut(t%w`*BWVUB{g9ҕL1޾sJy ә}N1 ) >@tѐSA+Uz+ug+ŝqsZn7$ݠ~ GstKJp ]%Zj1=]}]?=;E>Fc]X>.}|'>hj8v]1EGfO~qp1 ]%DJs+mQǝWЮUBp*ԴHWB>sP_b:IJ@%[-Bs:^A3Pre,.=YϝۓWLF 9Epbg̨dޞ욳_C^61w˿DuUrvɿ/ò7p0ܩ'N~K$+AIy@$ۨS3A\_ʵBrmiHUhoy'hSiiQYa~1Z&OT2\RTUi!P,$WoT<_ (Y8|_pGJŏsP׳=_zJ慯>R?OJ.}n^ag1P_kBɽx=ڐʦó3f,j)>rg!1yG@9\Y#ޚ9~ڈ}e_8,BͲKv2tp1!D!e`XX1c2b=6 <í`*}b8I/w)ς 'i`()z|KPҢ`wZ(F C["0O>jE,7r΀Wy`dv41+06fεE9Թ3]v7 pcBmZ,EOt9 .z:[2jw0ȝSl(@<_^tA-Nu:v C3(?ΊһCsN{nԋ4n\ ̠dVjbw5ׅ /-$WJևtCnu?NߧWݳGp\xO7H|1 N?-0>׺Xx[5_-ڣ.d7֞ԁlo\0έFO6{O4~I  u8FFQ:f;)e1q^#A6oO6w65=77x5Yg4"z0:hybgtaWtIlG˫ olDKE Ar9[^ (2Șs6Sьs<83 ,+Β0=MK >$g7 o'O0W̚" hv  LFB ZlR'TXZWҰȅ;qHDn#R #z!Uxd!R&R/5eDDL ` Xy$RD6@@*ZQ%~ulM,}-vfFgࠝUtJ/mE&|1M-^epHk}$F"N mc륒%1jc,F{FxFH\qO =1<1(%). XTSi` n >Jˑ D&HuL4J XH>%1E4^JJ8tNJ7;s$m勉18abm1,yw~}]ZPb_=4R`G&5\a>ǟHyd'ׂJמ8I"pQ]``0C>*tO<t<=RO<90fXHCtzz ~wi__Q $sݸq9FgO&K) `Jx:k!`nmL@A^X;}MgiZC& d uQԴg73ِ\+niq;́Q&,$`ÉCMGNeaUQ4R3wN9o4~<3iKY3}ic@1q68霳W79_MF/w.~~}?ߕeq:0 §"IY]?LĞѤyJ[58ťϟu ԈJ.E`͉M[>_x7(6h^WQ4CL<țLf& G0WvKWfT Nܭa\b|4Pq{}Ȧ1Z 7Zgįίsf2Z! /jsfqt:cB-3]V|ﮅz>[/SYwJײg /{`6]lݟ. >0r1z[Md%aP -<6W.K\8Zw TEodl z-f?=s7Pphi HZ*t#\>H|Ѳ jhZg@<:wDpFI|GGkFRLR+z(hw% t}Gj]cCٍƍP6:8]]bvg | {'fsUj(-Ɠh*yeަVBdGOίo X-9Kcn,tERN1Ẻm"2wEb^X_###aגMڙRX&m!NP\ R(:=N8`1waʽ3ø)aa18$ȸ_^֦Z>7ۻ}zeBЏmXG/րѺɰ1Zo0zÈ'WL4n~ɮ_Wk wlrH+Kbg[lQhz&E\(wp)BSϘ8P=tH=‚SڋLGr[o%?ʙשVH@ >` `pihۣ|ݧ//IM0aߖZUqeX/Ȱַ),y -%ǤG["Yxקig77ݘsFš;xM'25nɢGLHGta@Zg6JQ {;??gPx0tYEZYC Vsu ىw9&r6ȹQjEw9lVJHY"xo $0tv 'h;YC,!G#4>zLLpݵx#/3 /c-&.:RsG3нcYF)㙉:I[7N[4N[2&D,F@YqPyEP#%(-L0Ԭٟ4XO-2C=7y)c<򲕕zOrRP/( nG:n v*0 HpV\ 6QCRz@.(q W*HB9U <5˽)pGm ާ㔲9`?:. 9ڍ=spOi͝yڸQV>`\!!I1jj.s{IpsaZJɤ R(֢TRJQ[M*FfdH$$436g?36g4ƅi}4˅O}$6;,W2YS<-Nj/ df<~ q<8PiH Doד ɉ!:,Xo55*4CrLZprmR!`V0/2o.Djz)8N'b?{WFJ/<"/Ƣ=t<BaIʶz}#M%E*ʬ882b_v5TjR[RGQ:QɦrV\8U;Y$8&=Ȳt@b FgVh +!^.nC.M}ZԱ<\Glق{$i#~ao8qFe;g?(:6-_5N ;%i[.bC7L 2ѴI[߂zYkЉ.uD$BJ!`Ŷ imĘTp,X⚗t Hhc`dΤJ>X"DJ€ʀZH3AZ'A̅ƒ@OS61>3WYWΚc{1:|OY|7aVp~[1C??yFa>j)3LrŐy’ KmdI纕uk):Og3o}F>'5j[zu7crUv?Ke"\H&%Lۂuu6U`)qvρYq)V\7+Nq6U8[=3ƒYCiٌё%!UQRސ5i]U JmtRY{{D.>0p%z"o=W* rI sr,TΚB -4j7+ox:Ld5x(⊄G*V]VAJrkI}V2ǘL"CEWfBL!DH(ZO¤iNM7|t]`Y(Q2G0Q0XusCdZR0 br_ˏA8,3"~ˬk31縷^9A[vqL[03=WSSuIyn%*IFƌdK)\"0Q[Hdgb.\=#eMb|}ڣs^eyhg'HA uONqyRPjUzuŵF\TO?y}dvM`d2F5>|:xw2]WstՋ_-/iVdWjHg+ +I(oa;D+XVh|c{^[9`:UzWQHi1길qe`E(G_9M7S =D>l0~ _H?o~8x}OoiWHV`_9) zo~]iC+Yg θ+K~vݸ1; u1o_.?0L?ƱD湱]ģZ5 &ͿB&6?,刳GtՈg!V-B]%OWT"P_nTkF_e#F382e99J6p;|pV'E)2ȝLΆk;s{^͠P0-]}')h3 |(Wa&μqԉUsj4;G~FU7w*u@ye*ϝ.dd!"Hb@e#4Ҝ=Q".y [Iё& AWc,'Dk⑖30 Zd$xeWm:kԙW秃xi;@CṎc7>=M!L@8z❤jߘ3/z^ۗd8E?sNIY`EhRDJV ȞqK lI %c\XGϖ Ի*ȼ26Dn'۔3D6%01+ޓUg8ShO9ˍj_x6"=5lU<*)ӆGʑ;ǙqL` z#4#=Ƿo Zx;zw܁ w`)NbO} 'hm^NBg2Y_G //_L}{喗/~+eХ_|t s.>_V\qӝ=7[i;_'q&L]8!fQDxgB5v޲GSO⋿kt EIF}'e:3o}E^,_5;0vu:}ek?}[Wk>|6J`h8 #yߛqm޻jtQ!jlH/z1:8(i3/[O^sݐ4 צY73| 4816 -HZgZ{Zc1w:n> &'c95s'XAs]?ɲEߊ\UiǃZ΋)Y2y -f6qSVڣ_{V:fYB"tw78Z鏀M9IA5NpL?OJ4o"wr{,YӽpEE ȇSSW0ry.`r]2{s,ҧEO傠nؒӍZa-Q~ܑ(5zk֦.~!TRUVZ˴ml՜ CSRR7j[oնBCެ~M1tV* 8Xes[&I'Т29u.IR9>_|$G m~Λ][} ]|O{BAiPҖOHdIZb?G FlM%THTV(MԾg*;CoNmԊR7oyBZ}ιboY:KmoȾQm!})ŤPϸ.8jtN*X5(iZ-+[,ѲL?,뭚GX5?U#[5+߬Uc4,hFσTҬkh`e.f s BH*ŅG<& &2p!k,{i1N܂)q_&2',gRǮ<0-Q2de>65l ?M!Tx_j$hY(ODɓ6,dLفH9YI"= d rz/3{$JLRQ@qWf GˌrA89Nz"BL&kRLe&(aJh4b2ӱ^FIm&^GM)\o jP3iO>bE#I)K4<;.k Α>Wz[ SFȦg*3߼DZ hEߋ8$0\z΋7#BK\IcԳG=X6_\ wTMVѴa ?ni[-CHyBBldKl ybԻIrxuږ@jI6hP`}|_ ePD ȹ2hU"`cbsH`6bWđ1`BU{tώ]>*\{э]'\tRO~0}:;:Brю#vP{;5ru%A 2k4aRQpw)!-߭^}9޾x':?7mj"-&R;Yn'%"n;g?yQne#\ҷ%s)V"?{ƑBaku9u`bl&ؙZ˖V{Eb,Jevd_oUz꺈 /;qxP\AoW䯰_jFA.Wm]ENZMRm֛B>[W,;lk"Y.Vb豋-ӋӷK'{Hnʡ{r7/׬-hE\_|E 4+шBV-yS$B&Ox-'AA<:`k҆aGyM`r1@|Qd8T-]ؙO;__M V纭_筇yc!b,+4/Tݯ8ԿepstӉ-~V&_f >G[6v[46hZlc0ly*xjL֮QJJ!QUg$kD\6\W:ɛ:;qx!|zq6ptvf cbcn_sVs˷jysSR_.. 7 -zgG?_4d)g}u~q8叀O~/?~zjoF>NeXTQ/eVh(RhÆz64RY>ut;za$*p &]5tyk]48E&Mp u`Òv:84lhZg?Y/opSP7ggis'x-ޡYfC3> 8{v%;/gߏ#\`AJ?v\J&\ Zpł}? \A-UAuJkeW,nprWֻU;qE; X&daTZ1yW+=\\zW;\_{e@Ft+jg j}0*~uxW4u+K\ >WMWU7קg\Z >K:jd]c|6Euv~ΰ_~ nGOYB") #x*/\dvߖ2o2w9&ni _<}#Qńy!B.Cy.u1eeˏ`Qpai؟fϓ_u\Xda" Q 0'F=7f7Of׋K %#gJhw"±SA ,yErM7XcX:odԉ̂WkD7#X=X^=,z{,[[ vrV~qnv*X{P#l;TGZP}͂x6po.Ջ?_>nW'|6S 6?|U=g9̨Uz'J\"^ gϏQ;`DҞ jqWe _ς{7@#|Ut j. z8'xYMDxAJ/+4\_/^ ql~u}~ cY]8fկ.ъ+x7<+^|zn= {ٻW/2$sZ*<7]E%iS E2i+5(mQNy }WWz)Ii؇YZj//Wh܏/kyY9:.7r)ZբBl T}KɉѪ8-%h&(A7V׃KrAHE IdYk@ۦ2.TT.Z5 D1?'(;7e6KD"wgLC:jR& B 2`,DH%i4GBj)x)x-]uZa N%0$R!%&u+2$뒲"kAӶ_z.&[j͘\5#jB4:2Q0URBP'aZK-}JBؔ lFZ+ IʔE8!-aHmlH,CRBTC-aӎǡMJÚTuL@$Bާ\QШUFҚw-"b&ٚ,PJ1Az _˗,~w>fHiV::$Xy%C(d[5u^'erZ͍$ת,U%X3Fs)Qh:xѬZ)*!w>6Rwil RvUS5}DG_яkPuD,&Q.]6eP Mӂ&`!!S`6ՖLpRTDbQzb9kOeFM*\ J{+[lڕ@͍Q%gaEƲQ iJ!B: X{!xsu)I6$eYx4.õ zXo®zb%-0݊*<:Y[Y_+<$m8Y]7{( B'JR-^ld g%E#˓aڨ[M`s$qpYwJP iEYGg*){˺ APa (f-ZRZJx*޸zX <(JGآyX.ۥ؆|՛LBWJb5e(#<aBEW6Z+I]#%"(JţBFt)'xV \cYUDcQ׻u .&Ym3\5xo|ͩ0WxƼCec{k<PR#, Lx&944QD&+eB;}n*!nMI9.FiC6*8Qvb2Ur9nI0T b ~ƒm 2ФG8[VjCP{Sa*3!Q.P((Q LNsİEB6 :ɟJ`:wmK4Z/ y$_2/zȤF89uIQ$JtmKvA s^>#r{.5f5=)()}Ju yWBM6y-d -&cԦ46 j"@H xH*мGwXZI།LrJѺ^̘Hn"#Ƙ͋B$%bBE}o2*s7ör|\!hߎ~ћsr:YJ GopqZ%=iu}v:owO8:ÊxҸus%QVRA?v%Y%+@%TJ V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%ЗRL))`3OG sy2J Ea+`et@zw@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+`%yRw`@07'1 @t%f%ЗО@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J V@b%+X J/G tuk=ۭ[OsjJ׷ W@KkuwPgD V>%UoOGsӹ*$G/\2pK.]z%ʺ vE:g˗'tIo?#-˯NxdQ;1F_~;~+os-F)5#P"o5x'?{Pv{0BmӚobbMEۯgJ6=ڪA4I&k^N MzS)I7_!]u9k,oɶq >gpwX=C<[O}uǾzmZ%{3\DeA㩿c|~xݶVO=ZݩnVaGw" ڠ}=:Ύ7u:]@{Vޟw0[\)!ugT!@k˺ǽjOm;Sxdt,?"-~\@1z;iIʾx2NlG&â ϧb>U4f~sꇍ6 0>NPΆd,[je!EnrSRҲV5]td|FJ_Gn+1;Фqțы2h'/鮰n=VX:#w?g9 /9^C^jE;^  nsɒ%2 痀e=CC{4<>or۽YTHQucMv^EUM=b|CMnE gnZAXA&3}da %hb%J^;ENk v/DC8i+,owg|,Yó/t9s)Yjݳs..{¿jBRN؊PMV2Xc"1R@- J'-+1I > ;]=Azujft7AKY;n;1,^MKkqhqiC?/Ekӎމn!;[ApK]?3 F'Hd\R Y >\ ttjhN6ՉV T]R =4e W /<V $aNp.KSX*E)2De(^E$&鋓Y$8TqTq*snCS&NRA~uXSyy@Յ>zL3cٜxlk#໤i_S4G9PMөOt@U1D8Z6]E6z@/M6E|~b;fw=IQ^UaM-:T%5LՌU)kZ9Ī+2&jωc:؂,N>l;"ْkILJy6A0~d|dιH~ 7Z?fA9?{ ioXk_;RpLb aA2GIEHǝw3rM76;;~͝f\w3Gypˮ=MѦRAgV#?hBfK+T85>㍟Mq%Än2snv#*I*ףQހiϏ4ii9{S:^!DS;}Qm${Eiҍ?THs>b{Zd6SmNW}> |L;A&GFG#`ҋ9S~ ċQMWAkF};5SE=ߴzMw襽/V/cLv"m~~+O.hx@>j*ES&_W(s?'HLzxDzpE;dxzI@k\΃1.a Nja`㽝>, O/<.IԚ]2k[F\K9RuQk޵q$2?e~?{]8{ #xHC*Q[3CR!ij$1 Xf5=Uտ036 [|D퍜-Aj0gMd "yG(?)޹F)x;Ϟ!6;JVk̪]SEꔨuVʰ|r>5>()9S2I}i)aa)هhBwt>6;@v\ӢY,Yln*@>.cq^2>yRݺ}_fV! kQwP NM-L5ӑ| [Ũ__}Y. ~dS[8_ٍag\]K« ْͦדk?e|zzї'/Y~բz.^CGݕ|xIoctzE:ܖȵUcmZNL2wz}{Gb*Eu ez}0jC6k̬!,epݮ;=/o),E+-CY%|mxvK-YuC2Q+O&g{M 6j)73ac)6kW|EoGRxßY;Ҿ7|F9WDɜR`J!Vp>|oPkgy2'"O l$ *h-u"H5p, \hD6:r~(Q%ٱq+`,uxdֶ['99нVzyNxګ vGGbA.2=^*)xP0&Pe1|/ (% Ņ\z])O)1#9(ÄU)^,Rg2DO1"k/%M|9%rg`΂wDð[z#gG}x4|ە^UH]l :ߒ%we3]uCC PԷe7 lC|''ԇO4:O9eO6(j:r*;M20syc4?\{)"G^ܻi5h0ĝ|U$|59:4wFP]܇ծՁE?MM]:(/{zx}?r82I.eӨ;)^D 熿jf bL& z: BmDdE*؂vΖ}'cpֶM#tH[|*MubԄg=oO{z71\vM-䞙]rPv' ź 1Tou;Fx} _!{}T imիQSW阵4Q1_KqRG|S=WL\+OW"`n3X]M,Ef5ns;b|RCaei:z_8:U|mFZ1ɷ#p z7t\ rSo{ZcMy@Aۋ5}9 (%NsIbڮcF2uE=~M◝1LSS]|7O;=r}lㅭY-.ŗs`iJfb*qLBcZnٺܕG~Җ֋L5 &M 阠rpZtm=ͺPld.%yhi`׍b'"\<3zfkWNmr7Pphix>ka#=)(%.׼AaQ^YDE-gx?W1ٙ/w͘N(HZc҈SsIj7bF{Ax^f׾orj=½Up-C)@'2?R&4L3~>fn'%x`p,,p1*Yd(C8IML&A73{ӌ=R2N##`#aגA9#Iup )E@~`q(sVA`ha[0 ^3퍜m^D.'-T[yNmz:Sˋ_Q Z&|^Hx UxV?SQUʖޛPUiUjlt"1 ݠ '\1K}t|  n/N6 GT!v>B;(C>,; A#L9sx)Swd*\Q5%xyl@0! H I#%mβttshS8]wcSWI:}.&&+"_MY,ͥKB9"Uq;r[ھ>8҈jB5 a#Lo%&OӴ.ɵfՏƋ좹rCd9Ǘ(RH1A/; ںFR#1~H]Ðaeì2ې2F㬟A.j9r0[pQ Z?j]v=+IJ^Gm Ÿ٘ro/FAR$uwEND# oD#L\MM$A6yԽb?s ")PR#؁3ԔX5jG4j!6S;ZWf}{/{(c3p^&3cӎ+ (aO= 'v e=dgg#vʝ;:Ea;)lG! 3o$<0~ n T@?ѓ;B e a)YEZYNdlylF9q9ߔsLJ4j<[֑aYqGkZ La_lւ.oh=JyHtӬ)\Ui}l+=l'H^#YR+?ļY#a]VCFW/{@B%s^ oXUxfb8uҽKæ{C$  D@S{e⠬8FEP#%(-L0/!D!e!x0˜IDk1hFs+%g9[nU{C)}sl'멬{0XAqK=2qkTiFSlD^50$5IXDg'0_˧$`JXj{+%RgA퍜͂Γ,l#3.xr/;ɎV|`:,uuW_nƹ\jYʙѽj-n؝LOANVqù=$8̹rkR2)5H~PJFHq| .Us>jK ^Ȍ B5s@I{#gf {]u!WZ ?EL; 1 !f`E;$$ǐ>r`TiL7r6lԷbY4A#=@)w3j(zceXbGD%8$G[Vᔻo[Iav^d``Og`OEXd-$FEnY`2H-ɪXUplY 9RLV:1%0aL,=sEty#邸kru"lm.:\*I]L*XЙbw%*Q;C 3XL <ـ rư{M_c=.uQՎ(kՏ[~j~{͚>΁'g-@>>kud1t``\*ym+%v+L!VOlShYDEd$DDI12N) )bN%`-ʁ) H9^E*oU,MC(瞅Rw:KUrGu!4G䟽ᬖ~f_Ogg2@RFu(TDKjHHʵNVZPXt֑eXR˛zØny3><-֦G=nibW:kpIWL0DJIH2#ɥ: (ETXPU\VoѧI,p04d:i6kަ}.;]^ =S8:W-ʵb^Z.ӽba!1yQ ҕP@^YxI#<ڛJWS@b/${w|m>OP} wm!vB޹ ^,2?oV>IB{;` ~޸%^]㼺kx_9ZK}q NEI $O.xF.ZrYuXpAIO,M> 㝏e") (L8@FIɁ탉syt||(k{Qe^dM*6B#Q$)]O`& 쒦)TJ.Nh [*AHdX%yO6Kky4z9)`$T)bb]=O/ *t%3bH{3SJ^*KF쇬Thud1XLI{)N^z( Dtltlgt mA^nM}x RɠR:-BG ?tY0c+1,$&eB r^UZP睁.!`WLsXU >ϾPu6ndNೌ!hD)k0GBYtdTѪ%otU|rZnYEjzGIi|~LAyt<F茯}!v?{ϯZvddE[e}ݔ^;Ux vBSK赃[]iT[TZ8g7-ʛv'_@\Lm 䠻UOjP1PJZGOλ;OL.,E&Y9^e9]%Ee]!.+[ƻ)ӳ lt h{ 6%c}}Ga1?87 U!R߾uK޾_[8'OP.OFs{d\UqseR캹f^ր=2W,7檊+q_UV]7WUJP\@sp{d boUV*f^F抡ؽ^ P6WX>AMn9@ia;Gʝ2WR\ͮmjh*01WU\/\~ӾJ)U3W/\)WeW}|x4Lu[jYe.XDon$S6?Llsp1(:$4crf Mܗ'|9IOG?+NWi|}3:!C8 lϕJ dkgeD6a$%#JΦ͠@cjlX a56ƆذVcjlX a=5=$-hlX a56Ɔذ'BcjlX a5ƆذVcjذKHF঱a:ƆذVcjlX A@*PƆՠTcjlX a56ƆذVcjlX Aaa56ƆذVcz!KWj(̮Q^ evMv?욗8nP^xBXvEʢM@"$dX Pi}G,yTQP8wR!ު,iʲ ei DϺ5-b'RAgmG5򏬬t…J'kU ju!xgOǓG*ZeOAe%wƵߔ8W~L.czp[z#lA7xb}UET T] :P ;mcB@K-w-&w(&[mk a/!%$IDod(%+ tʨCl>aKم̉l F%Z̚z^)2x)]d !X֍Oah58wcjCw)C_ K"H<" ڡ@ػ)>(͡%=A )F2IzS *6"8MI[59zZB<ƓY(2&$QU3@;HUX-JGR&8LY%h^HY"Z|)Tf6%U 9;d^gsO=3PO-ޙz2[H&]?*iI>a]DŃBhq1A֪*ׅAFeo_EA"aRFM! pR)k6#h@2'6IlCSui|eWcvځ/1 z>57d/Kn~-ED-@Y$b-hl~^:RmO1Ά]0t> v M tт4j;@$4OIJ:2h5ѳ#׀lf0rTJsҚqveqOQ{1Py 7)L z'я8yw1Um5!4;'A>-96t%tJi 6YVW3rNk;2F PR!zt#]|jI0Ɖ81ĸ}50*u|<3ζ!-!-æM>tZ2|:a|Mz:T"աD›.Ng%ReH nmkS5޴/C_~F \d5ڀ#ES%GaeEC:)PQ1r`j5)yRm[@l^X.! &=RLbHd3Iѱ~s:98o"ݷ ]Ϫ%+O_>s'*V89zIFS*>g#1(}0 A58yic6{)A!`& D &%8J$1t`ܭKq3x|VV\rI/ʠrEL-Rd餐p"ߟo^fӀ;4-1;zo `M]P:HwD.Ȩ,-|Vo7ާ򇹥}sxxI%Mޕqd&MKu]p08;#.1) ᆵ$u4IQ- 6.UB;DxSRe%TIn<-()4Y<X뿏2$j!3?/n<|V[1||\S\ ⶊ ,c0n8Xu; HP=1HKF%3 d6Z)MIP|eSހr`4! Vۀa#Ĩd8]f.xpD%Չ"X3ҩccglk׭"Y8u!I$0q4P%T G.̎ $ XX$rieKxZzOxBeUA(S\P"l):ΗSUyWpVU^NΧO+IBh Ǚ(B҄qK.W7|G m?wfܸԺo(pO%⃪lS6r_{ )73NY])H֢J!]-=Tdڐotd5,kGQRFŊZ9#D6:SePrvcY-wpmګֱ~wwE+-lky^ݱ|m<2bl ύNK3+yVzMc"Gf Z[r$07<3I"$Xŵ4Qa(xC!f.d OgzZYρ|qpQ&|c8)*i9W0$#x

S9Qe&#B =&pNX#`ʵ"()'&u,wrꗊáo97E˙t2Dԃ@Qג]m&HܞȎRL<ykn_8G)$3l\G ;t2d+q`gDtN;Eͽ.Oޝ)ߌoK*WPE'CJy4px)d7f<>?nk!x՟VK|#~VVe%,ogx}e^f<fe.^8-@sF8Pl7j$ B:[IB~fM4"Oŧޛz)ǩ;-}?9ޭ˪i-S27Z>Lrrr.]+1 26ox綬-)y{*Q0W5ɎMʪ^0{+.i8N7Wcힽ/W[W8 =<=[~_ՅȬ:/mn勵r-b0S޸}e?"BUF[Tɏ#0ğC=:+o"$k\"En_2bRXXuZ=⛻DW>w=ڼAV)oH?R#VaF[<]M %niʯfbJqOL\\u{tuǡvOX:64o$̓ *dpEg&l8z-<-.wU.4̮57'B6)1܁0NMV8P5QVnUy>SKN;cJ]Ny/##X<}hu3_1I\{!sE@x-4wÃG6xLwOEz$kloPeU٫!fw⏆*V8C\3NpKP)>M+TG~/gjm<> h- Mpx Ddx#<A?M'voٷ f쉂w`Cy( ,)璈d!'pWϤ ZHݖc 4 H][OM*QǸ `] dpĠxvF6_d:~N4C~@ò0uUsrAL)o9sO=Zd:(| U5eIk/ Z 1{"CL7H'ɮ ]k&vS-nTn[H8lO?纚9HaQ#AsVsf9+NF+&"A b{ *Zqt(EHOND $ Tbs|Xͤ\#\=Dk9==z*,#+N<_oǭ<=:fp9mFW}u9wI(=*JОBD%ăeOS/#jPJ( 9:E#(8trLpx?k5D'$:*'i#W!Kr5[ߟUJyG*0ܿC^UN!uo#ZT}Ӽ, okZn_4~^qz+ }ڻ>ֽyVᏕՃ/W=hΖs4l#hfu9i4x(5 .cٚҶQl]YJQˬŏK>O}qeqLho\N5*WwՏ{?dEg^gg/^=!^xg`}.J}4 |B>;COv54 ͍bhɻ|o3[ƽ{| %q+amu8?t(_zUlhYޘ'fǽ CJ] b,gCB ;rGڱAexIC%]_roHA2{R[1_D#![N((G k9gQ&:ʆc{#ow6: L.&^ p"%1!J'-9tS|g{b[G;/{~{, ʭ;^:{n;mGp&mn;aT־  gd` "PuA# +-e̖whÜ d(SaȒdYRޙ$B S$؈8H"n(Qt axn!t(lXUI$%P`SG1'QDф(;#g3ND]{k,SsQڙ#3qX2^+S"K'nPhj@vzP ڲAEd}p,i+U1<$;iMvFϧyM"~vL ?\i}cl4h6<Zlub[+9OzoSV=Ufkdvs))Wޢ$ó>:x $詔#Vk-Pr@L"2:#F, YO9=: s+JIu;#g3gt ;B-/5ā> '"2#㖄m*/fA>uAx^Y8A-fąDb 8#PJKqOGf)&L(hq$$ps(fɕJ8%r4h ML(J>&o\?99Š/\ݱ+sm~n ,, ΗuE1{2"̈02Ȉ-F&\x4 @z){%iH<}֠YF~[ap~%ɀ(3!h!묹ƈEP+#";i78_f^gꀼVR7)+/ڞyQ8gwkH E$"eE2n2љ$Z(( Sk/+\kAq( %1? ^q:v< n~6WCzw_|s6:1d1vO[ :~4X}Zwgr*>RnV'Pxʔ ǩ^bhe$KUZNnꆤw=ɽI͓?﷌|Wg;ߕnȫ_c6aɁ5ЁD=ͦ OڟϞK%Ϟ|G?\3fxng Np@lav7N_j~'O7g^A =;Yr&)`'Az]`B 'x{Bgb3dwcwzw"BbhtEpdztE(0njZJ1t"G:Erb]Ԣ_0s#&gqcSea~4̿D{cyO#5;r9ۆlSѡ}cU(k^_ s/΍r8ݔ|BC𚧓寳+fx+@`FܿR|'2h0FJN7ΧPڗ2G׿FJiY-.ʽ$VAFށivҫJZI8Е8VGe:VKuG^^ GZ D#]ݵ903-yLj rеNWR*6 ҕTDWXjV ]ZkNWb+ []Ij ^ ]Z9xPtutEuv`]\[u#] ])%P>`7BW >Ԏ(S+p"BҊj:-07t"C+'5գЕQYY]Z^ʫ+Id - +BHWHW̞u5rRcR[C/%\60. x.=^{BC:\QͰGޫ1r"8ݣ[uhffu;i= xrV١{rRѓ;AOiyMYXjjZQ6&M}=t%zy4jǶ:=6Lc%MuC9M.e#]ݵvʈ Ou"l+a@B' T5tEp- ޺"ʎtut% H*+ "g*7t"tutGR;>~cדhﺡZ(#jGNj'FBWʶ1+B)HW'HW LEt&Nh JɘrL+BnOny14 ܘ>4])*MEvUո9 >(-Onpp[SFJVCWWZ!ntsΘ`A7LW u]㺡G(Rmy t+]υ "nt( 3#] ] q6TCWWZЪ4uut%5&"Gߓ\j+Dk:]J.G:Aɴf6VCWWZj1t"Vtutdç+ jVj'Ǫ a#] ]i%DWX=3ZNtLDWzA[uEhABiǙ+5 mqE" /AW$foQb^*UO}N"k"VTp[{n5 ?P+W֘/܊Nn"/o;Ѭ]{Õ_|˗/[y|ߧYjA? ,llB ECEzn/)\?I;v}ڃdXC&=~y}qt{CuCU?K?~\\92wg^S+? f+u%>_Sψ$s Mw=*3LѺut#k?*;cb̧.d2ƲRQ! >|q(eh<E&͢v gohv׵Oobޞ_&xռ3֠RЁZB)!Je),d.B :eBd/ɱkPPk{!n\loBZ \^yg{%X76732jZcM;{_Ё~z_B2 .r4Z= ~өݣe׫`-xo3 gT;i8x΋TަdQEBg NWi{!kuT!)Z8ЂTeePPHKX(%Y8Ф"EfNʲq6/ޓZ/fm/W_Nqm^8|U]}ͳghsG/M$ ?FUbڀ^#ʊ#̎3^ia<٪A! @[mSʋ7K$#)MypA];ў"oZP(kgZ^Ui[. ')<<dbt DqArx=:iw ~Lb3Kr5[?۽ 5}l1@^(P\33 z%=hu\~Ktq]ބ-)䭿a ۖ^&mSt/}֛1S?+0g5`ΧxgZ+[uhTjou_I ̩RXES<U yEe` 9~Zȑ(ǐ-&+tt8D/OȭS$V8:AnAz.hޯisL$y fL68ƜFdpkdtms6'q@׋ˬ [=߀[//f*? ;w׉eBR*[BNFz'gC -Nz1jnodu(#c_q Q2x󔔰&YPi{Ud +1z\HmIGv*6g/B;Q3`І qLEQ-6gcqϒy'{ֳg^+PVP>'xz03 $hE(ʕ'mASq RI3' o<}d{$[LRQ@q]Wf س\hjfj'~R%3zI0E87-Qm H/ˉht4 q$cWWCM&6w!ôIAZ4Hr¢8(lxq..82Al׋{`G/|]||C'HLAJKJ2!% r$yS ~Tcg5q~~(ћU;laO1z50𒦽\+Ni J&$I6GJΝh?,L=\[^@rhb<e?4?7n;l|qFbԫU!N2@i`A7A2@"A'3:s$LtdEJw>bD;ʀ>  6&G:s5B Gxӯ'y^Zydue=/2d'l)B,V'7) l Η=я i@󊼗O;?MSъ7F.-`9^:$g[AӚXHrw"{h$gDdQU*V*:(gŊprB@Wg#o=u}`ܯxܐK>v:2dPB)~N;ֲK6۝`}"cNѶ=mq^=ugᰩ1.0b^0pR]_vz[f(ki_OP^.:_jc* 7't>h e EBCŇ0*fP.i|.X9KAkhaCO'_vkPbCl^<շ-m~abw%/{=ow%En+DHKqt#Z_Hkh9pIY{Ͻ _68Dڣ!K(i`(ZhvPS>4szrUuO JIa8PΤ9 6A L9!žWaR/1"O+냉KM-a$Ey )c"=z#gӃָr2L\t'UCnk}Fhի|i.guF=M2 p> $9I;K%Q@Usu(J{`1"h@QAsgт#*@SEꍜ9x- x:bWvXV iTvٰ`W,OcYjD;?G~00?+wӥ?C&r B($<3cĜFXp4/8)u7HX) Fs^2BHCTQ0a`6 оl<7 VE8AkqvfJAp2G82 {$| AH[Xy٘D佖&ZAp+҉=={#gK\iXnfQsO; H,@9)2F5E1x}b'.+ݦ-'?Q:YxF; evEPNJrHyUP)wdz.tJoA owTBןQq߱go/ͶmmX_mʴ(󔛮3t{K~y(O*f=?39~=ݐ(J(sTl?6 ǃOw|̽Y[Yܶ 'f`~FLFB ZlR'TXZOҰȅ;qHDn#`]#gG䁇R7)PzrQ 9[I}]ѧ9v^=]pC8@># ri'GRIƇ501(D؉DHky.9?`P J R\η0(40e7t-:G2e"1ѫ`E*L< KIӱޜ c0g;Ja- B}x$|sKSzUw[xRuAeIv@%weN:3*mӁz h|Q8!y-{퉓r0F(`)@t0U!R x<DO9Hl,1H˨m*8a#D#ɓqiݿn O,9\O)a`0y:;风ZuE.` &UQw^~X?0}B{-6g{"{K}Mwqg K< pȩ "4*&Rjf)SH}oz~vj6hp[:_Wrs8nu24ӆQ ;ڮ&]]3I,w^1^??#ɥ"Q1OS U"ş_E5a3q^}XL7& y BmDp2j=#6'E݉dib֖I#șHV|M14WcAMxֳ&P(.Ӌ\4lv mҤZx\"wv99Y3X*D4Zw:ZJJ[`њs/MvUq{j|ҥYf0u2ݸmۿYbwauVoݔKXՎiJ00И!w9we=mƳAMd״)!U9]ʠ> UL̥d:_-|*vu,><C=3ȵfų1 "!54Z&qZ/ZXdNhʩ2Jg 13o?xXzqW{D?z*&;C3&4JKUr@+^_@)KSg_ȧWzs¢0OC0CKw_.qRۿ=ZcjTG` +̀/2e"SV x)Y+싓~uA>$*( AxCly>,_&nqA,HyNGg1Vԕ4o^k ډjr:S`G6AxSzTw]=oH£Vc}o(# e;gak},ui fe1>hH"Ősb!𛗚CeYa`Υ&LI8FҎY:=`8H ȳ\48Q%(T |uN۾Ml8S7vWcZVj4tUn`"g(r!$2vKpF~ J`s G]`*Ɨ:<(eĹQ")9IE$s2JaQiƝVc  0B'LܽݗJya ^0V_2,F0cZcxA J0 0󀨔jY&J$˿j*մ #qQ<\iN !P:Q+,0WX/#=$Xh&݊fBKH"Z PXη݉&\dѧZM _k8g%`+|<:t'oTk| m'Sr9)~MqnFf}(7!.i}P䦮a@!#M;;2U ]6@0XYiC(X*#m]N YΣt>ߍO9a0ɐẘ,HrI:Q!!*dF)KF)l>$IoT{w+ޟpB e Lˢ,HΪȼw?{ǭ2eY$} p"x+c;[#E#ͥg Hn" BN2AS|.9u`gù` oH!#Xde UMv"{@*]g WM){lV,Hi51G21I)?aPV.GO՟4DtTDB-#v?|B_huթ&KN$lV٤ +{rQF* ¢Aѯis9]$ .THѳ! d)9[@w:MΚ"{@f ˆ](XR5eJ«Xy*oEd @] _xP3 d+u!?k?*K"/S :kŞjor^^mMn,e9\\/]e(G ͓-˃-_9jyۛ/g:|ikѻO+~Ӭtt1Wkn5V'}Qt<õMI$#hMtZV/(@ѐ-)i+RQ =9j.6d9Q)m)1&cor^&cJo,mflB/ -3po?VxE}2/WMLt͟ן./PUĎr8dQ!!X- *l9@riaGKV mΞMwC`'bS 54k+]FgJE9׻u%eb뫴bڽ͎mz=Zx$x֨R9K1PBrlXRN]#}J&j@Ú k@2d/:l5)hd Ra;*S=79/p/W0͏m=G"D|_+Z٠rhk0F,F%! X }d,DkBa j2(oY8+A%T΄"a6ـUbdOZ""*#+;h79/ӻCE!u6K"E3rqZt麰 ErSQl!r 9[&$Vh 6F.i ؖ=[a/tw؜=#*g6cCah5#E?Z>GuW51v?GPldw1uހlv2:ejK1r? @ܱ}B0!ho 1D`c pdJ;4* @,,{_+QP"J`.l}a)tFzٳd$L}g˺79kB7ן )Ozϐ8X0~>{Ω$Sd*@B2IcM1HFAm *Fx$Tt[]wM!--dO{Ksغ=~5}W}&tZs7+^ˋq.hQsì|#uy3*gT]^VX΃g/)z+P)E@-eD.dP?؈ZQ5J|2}::pAȷ3Be"3=[Y#`:p}y =AfrNdDcA>|p!l絓4͚ Z9H7AR̉T $K6heP`؏@@~a\h&J\[ qS,"P, uۺA_!'Al8/:/y{(zH[x9?t!Z o9FEN լ+Akl@k&룏n[ kDӛAvsہzہt^-7= "Fno>\>&u]YvDE"uvY$묍3ɧwWt8ϣ~\Ec-Ǹb^@^Tg.. PT%sJ*jtJoNzA٧7i>^CԾu&->?1SM&7y9/Wﯮ?]Iڟ7rߟq|o,>9\S>]|oO%l>P+'JxW9٘LP+l8R eir2S gI%%SJ6Fb0F9LFնװաw_?c_ XSkSiɳn*s&QdHET+JȨl4~\,;iqw5=A9\IVO voԢfzoŇ:{o:zj3|HuELvcx5H8Wj<)!oL2Z`LѹdIĸ<\bƎ=zB-򒞽n35'V7|5x*pF8~ϑL >+z3?c%_9^ߤ:*i6 f<~W?+XV\K`K{5?onr:V"Ww7z)+)3BW&!G&l͞kۃyu>^3i3P "VG]?;?=c~o,P?c^Ko7;;?*,bNx;os|ӏ7W4])'AgޜMx6N\ZP7;_Co}u[qrB쳐4Fd:Nr+PrҨIEOy4IAϓX\sr(4‚@3;.UVv\Zg*qw\A1 ֢ *l+j:W'+`*XfpUiWU- (&/vuɣtMg93*nmaFf*qsZ;xtbNnpiͩDfM WUCLxDʬ9LYiq`cj30fj:6Sive6q)XA fr\Uf*qud \یuUW˅:\)"%DCjWUA#vUUpb0q/ & j7SqUUW+-]`e3rqZ+VF\ #Zs\\ X;qqeA~^_M/".n~:DԮ{ޛv<^R7k5-~x|]Z,U/ӷOa>G뒑.p]uEj"`32{5lY sr24e8ew~ƺd5xf١>vHzDw { e,UuEtJСws>Q%Ehg P5Ѱ`ԌGSf:Vf܀9E@%)1jEuf!$)peji[N%hfUn蘮*1`ڮ9LPd3x(#Lxf*l7qҦ!\` vpUՒ:e 5q% Jc;cu


Tn)!-mA]32&Clв$0W7xuL:Gq[u+%7 ֢m'nvCUI4 'h78ypul"Z(bRnη+Zsv#<:677KG>DHG9xmW4jۡgMKbOH.!+V:ʡqu\I{ +XjWUjƺj WUu#NW %X0v*טVpUWҍI Ю%\`c\3r+Vk:JW'+Ȗ` =hapj㪪4uu2ZK >zikWV[WU%W'+AIH \lƺj:J7$q4. ImtavL~hluјMf?frIm4ec)N?MmiX3gڠe:q %-Zݠe6fqC\l r]3Qh6SrNГsu#W6*ش*6O^պobnh1#+EZ hW, 6+{b`;:㪪t0qP `u3b\ZסּqNqu\i`J+B+Vo]J2r)ꇇCSobJ}@,?z^1h[Xwmm$IWc߶igfD4v0yˮˢF=.$IQK E.'3#OSmW^Myp~z㧓CCP%PwՈώe|wout k@owtv*iYnDxAԻvvLgfu`9lEM;lB[to{tW9B'/NlJz8O]LY{k"}@(^vl>'/d7u/.,P*21f5}e]̔w  /3;4&GC=?>}붯_AZem^TK_a&Nrlp$"cCojv9#tq+:W!q?~!~v}@{~:]#lM ໟ- ryz^+IFngUɒ>IYbmQ<^37A5hrʬC#Uc]S!R*6c)jJ% VݫRyO0v:LuH՗;a=wrVPcn{\L*kV ڜb&r ZnN"rбQ\ #j Qzթ)EME\V޽?ޭ-5r[\M %ehԔ1bN"jX Bu؍q mq}Ck`\b/9%|n|wDF0sds^߹TZhM1tڣa'?LTTD2; 0w@.\RK>f&Ea AC# 4b@~.߶Y*_hS,8A1#3X2d]șcq|}asyshZLM%9ՐJJ:Šs uNF}zsÜ|XkQkNҩТ-)$qu$~`S€ڈ`DmT%З3LdqzUIOi5/*S}Fu) NXXś1!KutAڳZnB BQw_Kͺ#oG0LcmTȗ*`ES }FnQxj v**6(:ݠ-;y9u|աiJV-TbպJ,dBgҥ3cMm̭(b"n%l01eұ;ulzEKxZ(u<aGR7fa=g5,C*D@ %X_;wMBAqUSO)8.H1?'؎~okJ R5R c  ʄW'-Y9G!hTPk@oJ')CiT2TSR58IH&`5@*zjB AvEsze]R uWh%W e2a̷6Q9ePDdBEAvcŨ3|5籸OANŜY' sG !.A 7)ؗGC%::P%@ irPfDk!\(VZ h2ݙRPHq Ls$eIj gYVH( ;:=~ 8匂ebU.KMh 1xD`[Qz˙t̜`d,"1۽B6ф=r+ZQCkAA zqrnЋqYD|bL±yUtN&mqvL|,gכv5t<j*jz]`!00 ƛAy@y*}td 9{H\!QUF2bx蘊'8$;Zg:#.c) >@E&rZed^S0P>x .N/ט%$0AɣIA'uKet<o 7|uXTu~V% 9ՠjK0-"V1jlwy4S,7.B&Ȉ` A]jrm!/fsP6DRD :8Lv51u,AU4 a5K q[O -Chg*I+y r!z-bB bjw ,U3ڄ`1b#h-j| 3$2.@Gj\c'?  tf% XƅJL T "Q8TD쬩Z5gKG `jU;R@4Xq 7ZtY.Gy:[noӮOWnT&ɎmUA[.n[CiA'EEuh0Nv{B*"bjЭq5M/ yHY=yh4vM ƴYBoGFзUfĞL:k80)QC^":$ٚb樇 tyB1C[uŹEiI,WRTzQ댂1lJg':Rn<+.w`Lo\N1֮ur3pMr{g1 bX0j);ƌƈIwv:yT,~ x !mT621(ƀ$o6& fnV<Hk֬UVg(iI:!e3Br!; tM9y=ݳ4נB݄h%4nj-klj΄^vE DOU>:jUàe¦f@ 2q_HmfhJ7a#U2'YkO^ғ8 8XkCW$ЭuNϖ֏w.ŋRvsq~A@Kku[w(G@Pp<&#2.?zƥQ1.=/>@kgwRa_yͫXw@%:jb8 @blkBU"|=-eMsyASme>x֟˘_>U3>~@M}BբeeB.,cke)Yt/碠W(XƧu?.?D[df+mv]1@Y({bD,~럾[\6MQ:T(.v ŗKz;,htÀGÁe nGDWGCWnBW@K>u(ЕQ_7FT~o0o)]X߆Fz ten[ QDՀñ (BWϐH9buDt5.h{4t5x,tE ()gHW emu.\z (zteǣ`v$2ڗbF6_yd0ȱzOέmc_ŗwgZUz>[[圤$YlIJϩ ^ PJD3\xEH$"P Zs" VUʜG`wke=Jm$\3"z+}h YX(A~}ޏ^{+ꇳLʠ0FUh0˜W57) ngr+g'_RzhNb\ZS8@Ig|oH-Ȧ@i  J'< O0;TB}0C +^|# 3RN⼸*ֈજJ׬X/+Xsi`;p) HjI'%1%JAx5[MCr76Imݻ5WuPd]\hO&Hg$:ofAg;]JM+x>M/Pzv\l=\{K=T=%p%:\kzpōcJx+k/"N4WU q%,;W(WH HvMU q%ʧ;:(0 Hh|vE*pB\)k]{9Vz+{%J5"\vj!c۹| >`%\Oɮ+Rm⊎>uIo+Rk" WVYi>8Q]N&4ޫ`o=~%o|tb-o"b2w[gvF[jFx $8dc ݟMo>&G'o~;7^dsw3_uXuU%-c/ I`jPXic(?. _ׯbȍu5n9~1_c[ݙNEs7U[P\;G\@r,7c,Q!|kR~޼Yש(ѳw$%oEK0Y_<ǽ{g".<:)p7~_ EŋA߰e+WZTޭ # u#(ċEﲷ#|[[|[S?.7uL{e[=cJh.{): ٚ"S7Y,{=7櫛.*5-Lo->)e/>$w 믓M+1u i""w~U!+^CH86o_{XavEsdl[}9jBTτ]s^w1xP=NYliy,5JmZ*ZwX>]7WtiGC(XJ\%|$Ct[9D Jlץek9%)U><< ͡쥼v:s({)˺ɕe=:8˸w5g= YvnS ,k@'g7AB ?7nDdxQ$~/7X\7,~}w?+o]9# vI2 >T*4y.8ď׿?FBi'*a-;6蛂sP3d)?pn.ݴ-b뼗?NyjR+8)(t/rS SeH zc3%p-c{;QB}U-51nUqL9Y}T9>F?Zs|h~DIEGz ?qK~" ?WspDu >Hc>=)&z>JOQ-_{G*I1\{+o"6WuKdAaK9ܸ*'מy^9:JJ•,+XsP`|͘W$/":JJe:\WBKDx+hn7d؛%~ hU._tM×hK0<>c] TGs vlG:ft..~#|s%>)2vP?%%c6+;69e]m疠uj%O @2oRo/75D*Rފ kM'a|5 ti1mR+K7iT`GEcVktqt>Bo5Hlec}-%pƴtnZA])h{^|qdxVгCTYhKIc.WڥJaz\]J3(BYo)˭/]:tJsWsSȱixC &S`>sP)k{•q`lǝ[ZڏIӯRl%\qoj)U?dF[\ xiBa>EUE-x+R%xqm4Te"(Ć1C PaRH8`a᣸BRO]}E @>xo3F8_QĪhb e V[Zƀӓ%L8KGrRݡc)=r_;TiŁWqr1=#̾P.<@KY#-3nѻhS$~1&+867LՓ8~|:e@YqXas28Z$elqc* 34f5ތ\/cfdgJn̬cfFЄ~`#5JH%&!g3aJwqV O @?)F8ly*Dr7OPَ/w >-!rR+yqE*u-i! :RKtSC{ڬ*Q\|4& :L?LW&xv+%WΌrjL+Mʨ*+Xs!A8pEW$8_pEjh:H%W-ĕVP|ASr HS쪍jOO+7"Z+TYqE*y]WJ Հ ?L+R]qE*tj!҂GBJKopEr7"+TpB\ i+ HԞr*wj!2\]`a7"}P{+qZM\UO u~QJz-z"'2!6oO JC~ѼV܎s o>Y_ y0F]WձZpEW\Ƙ/"uoPWiWO+d; H.X_pjM4U qE],/%J:$p_pEjtˮڈ+e6#\`gԚgWvcWmĕL*I03vr3Hjk:H:mĕsH1ZWVgWRw6Zҧ+ѓA \YkMep\9Jch(R %$y"rj%k|3R)dbxcO+W$Wy &6W2w[+qe .'؜{rrO<(Δ]QikXveJt:܁#\`p Zn|U"vj!H #\`\\+5"M;\ՓJbW>eW(XpErϽ'o)CqE*pB\i,)"Fx+k/"P3* %t(olm e%%=lR7އ[|͵yEYZ$)B i&Mg_V^M&a| Ӌ^H6bgwS MޫiSP =${7FXpAyQU˟G\E*BmIJgN(uv;1ee;>x͟Yo4O'4_ ;X"~6_'N8ecD^._5#k#  ULj|  J|#; oGQaj.KBAݘX~b=O 4ݷgadb@VSP+e嶬j PṚ0/U#-Y#mc&nrV5AZ!ՓI>sXʚ ˠ9ص޺Ł#HwUculw|ϟ^cm7j-"ѫJȯ\9H˔Q̟O`ycc:F2r!H UQ2] ]('ي7 rY("@UPbȐ}wԊ`%,n t\hEt_iF4(1,W@Ybj%0+2 EkO"%L@ziaiִdUA.dz܍:ָS2{`:GU`UAi2X:'`ݕїwIe_|6w3 w?p3p$۟"Q@84Z)uqmZs3wYxZ$-KA8|&G[NJc6%) 6}P0|=c'{[ʺ?3Nt%꬝b'w)DaG9~ǩJk $KX~@x!rywH: KJ%tknY‹iR7Z>UָJ?> < Ew׾]Sa7%}$8e6])q hLp-E xR'G(2S8d/@ƒ@Xq?m-}ywy8 tG_V^h{ozyZ?,ZRP@!)hJ?DP#~[4Y~K I,hWdꗵ%lEE" )qV9{o go<ԋByb--7k׿C819+aTUޛQZw,iR%rwJTAp wMgXQTJK.7R&}yjdE}臇|j?<x"!Spw0~%D`!c05f?9=*E`-aM,~>$,gѽ;XC.k[vF-Qi]Ӌ ,X~_BfC5ZF0h<{^Tx^N㚗\[|ݣ(kfIp8yXH wDMWlJJ) T##(6@):<X<bX07_ֹ)i F/¼y:K5|T<;յ=h \j'YyRC!n4ҧXk|4{2S"" LI(% # t6|go7H$}#] %WPTѱ1,`{vYзǟ3EB*/uԴ>zmư@(M#=VkR柟>&QRve M!O8"}xQ$!L5=^|{*Rzi2ԋfcN){ϓCBI5N8J"w~kƧJY?y׉=Ǹb' IE` 3`>܊FNnثf{zpg+>(3i㙉]lI{bVJTg&Q 2ه *u`gHmYzIHгDcĐ|bEv_PridQr jnl:jϽv`e+ڳJ݀yI*+kpo/i~Sjy 5Q[m)J3^; Y K.4;;\q|>Mª @7-v,CASaHŮu 4>Fp5"aԽȊ%-+')lٵQr<}28ŖCin"ݤ^o9`鑀8ɪ0oaS>!2ٴEO8f(*[o˘86ۇ% qb;lzP҃l^WkZUFjUY#C?pұuƌJPB^7i% dpsA3) Z]Θ̵4 uۛnTw`k1{ <} KZ ^QwHnv7w7 E d_dV(iA4 Al ()y\-:'z70uYoL2Gb1쵴ԲбR+LDz9'!Fsn`v[6tD(|1m^y6/ٯE w`Sk+JwaSu9*{DKnEuy="څ\h[w@#u6x*BX'}Q_k e&[eqN5n:k_}*"mQqjdOȰ*$Z7h+(0V%G3Ep)ۻuєfjkSn2P2yJSuy!EJDSּcΩyg);\NUykm׃:q*\䓸Leblt9Lcac`U'.J:ָƲ'FiqUn<2ŒVN‹!9#{ϻÚ{_kWk*ڔ.U6mq9g-vwMf'LnC:{JrLڹ8I-t.9韃.R3dtwaniakaqxnJdΘ>>NCONcA';|)QJbeH9u1Eu_FK`dqdz?L._[騨^ҡy+*lۿ|(!B>\?OG13DthXwڎ[qiA-_+*ެ~\$ 1W$6zۇ㖐SLƭc è@&7U6ck]FG"~Ef>r5nq~3 R6OL?A)e,<Xإ (_3d,#&z1\qm 6QGHXBP(JXtYx')3g@ʼypӿ쪘 t{ҝC#Qd8HPq~8C 1~{9m꜁(0u]y%l\o$à %EpYS}< z _߄8ƻqjٿ12Z?}AJ|&*^`L OhGϰI<%=nt=a Ta,8*4K@Wʺ A!\/uIpew8}|Cevͦ7l|3u/hа(#ۯjdR~+:s3DHPqLC tC1K 3*yCXIZ7BP0O/+$(Yލeqsé5%GӏJqo̳az0̨X]IhRSZZFet%L!N8mTqDTy E4/^G_ ˟78kg _5bf;p2GFӌHNrIU|ny65ĠJp@yE X'AJ7 fl4"9avC E)v)0aLsF WpO$a <]o[A(o!4ZsU iu#y*fYk Isur@(;5(쩘[8Lqf}`2ߐfƾCnKRI3tVQpۑx ,Z[Q>Mʇf!&g~4ڟ}z|m_!a ؽ̍J}̃01L&RX^L5{SG渳Aũܴ~WHhbU 0: Ct $s{z5e 7)C%nbۃ"1gz1Qh|OǣIWeLz^A )6{DMϦF">٧Dܿ? O .M?#"RRs]*IlԸOfO+a"ݍ(L[m(S.>b4%}|`*gų5D^^ FA?dwc ȱֳy_0)U,R-[dIm'ϡ %RQdQj`]ȏu\ <)V DOzwӺ+?l~M4%B A)(Udw;U8}(@"DʘGL` dU"6'2Ǹ@R{{՜`eY!:K-D<i+vn̟sLq @䴷`QY%x?nx%dHFQDOk.:"3"0^x0ީ`B o(VB}AG#喍2C9w襎e#@ I0Ig ˿AT¼G\vZF)D{%MXw(&F3JeL2a&0,ɹpS מ("(%8r 4+E3G-U͉ aG.JB S0HHcꐁy,R}~^{ƅMpKF5zFR1U`TM\qbP(UO2٣ڋ>N[iAIYQz$"xo1v!NWŪ񫨞b'T$a%IV0I% jT˫Kڢ>\s }zn <29N,EX\_K]RxDf8L+E䌟f[2JJ* `bRu|brU7SJb"P ,jzGPZXllOxz x@ϐITfYa@=W$M,AXz8ȳq?~2u [$}.gBE!s,+cl am_T?x*;1NGzc6؛mO!!G$pV?yY2٩L%Mq,i2M-yqٺ8M)D zRA+S}E< f5M5z30|'ࣃ:ц2geLOs!< URnJknu ,YpQJϩX׊щ,<QqہI 4FAš-!dQEua vPU)h>#}nIGϏ|͔׋0ɔU[_h,4AI1v*k{MjC膺/UXTHbJ(Kv"ge$)L#"98gU·HQ9F2BF BdИrzb39d%x]^$pvS$~eNK9\WyUw^2\z#Wݑp00z#I$N&CN3M:T"՜K5Q(Fry^g MRGv\Y mVʷ/8 lu_;22z .8Fsx*1HUhͿ!Ś]/T+"" ]JD4ݒbG;v` "3ii"Ҭ|^Az\r&'nk.Z~|7Y ʟ7k;n 7 q)fn<}Y2G$dB^e  #x7 "۴֕{{*W() Ðpy9՚e?!=]_!ch%.SGlp</ 5cUWGʷF`nqzAlRqJiYwRlZM87 ]85G}`ԠsvZ ikrfN FuxǽK3&V$>vbQB82K xЧu+8Pbwp:ޞF_i=1TTkBi6)-&hyRQTFֳfl 3C8T"yZK_-AV&d8.j4uoG,8-璩-tgYxi Ahl~AZV i7 ҫ2@)&T}7ou<7]Ŵ˾!.=@%ӱUʷ/A5G *;$ԷOv:.v ,“h kM2&e0Hȭ17-rBf/MŌ"Zî׾dh:3^*IETueb- *BJH7ꗪ\Dx5-QڻCH"~2uz.RXZ_O#kبٲOz IaLg"jXu|OV8P cHv󴷽_s_f [~Ͽ'MZ<,\B rErl7BB7n`?6{7m0Q SUtSm 3?D]+&|d]Xo٫*x3߿Woҗtt}CCE"o*Wnï?ݬ<};"௰]J~ah sQ4xn>)nb¨hcF[6 ØN㽴7e1f,ѳ= _[$ 0НEjhj/ju20ˆŐA[UZH)~y4*`Ƹb3c{\ /lc p(I.#.Vv>oh}3bb5Ew$B%ZCh4oc>&81m>>.^%@Ӳ5_2F}&`)jF7μFĢXO'\ݧ?B&u,) x*D0I--!:pO M/s :=ڶfcq2IʩyF Z,+4̢* BE!,#56ō+:+&C֝sR~3i,]Z_5I^U4z@Цq ӯd}R$5*-F-:Y.73 J#`PHʐb1}+Os H(Ũ(fhXKzነTΒeKn@s  5]|>]ePrB9.W)0˹(4񠲨`B |K<On/X ȍj)GnKlZ- $^7ˏAua.:m~ap*d;)&dGHLQW!/OF_WO%鉹Q^!c]--GPKLaxUMd^HBƀ* ٖ{W] 0^m#& SG?myţ6v6A}*ڳԓ2zтYy=ZoQL _ʦ΂F:R1CaUSI 6Z!WI,_1Ӣ!)cY_G s}u2#y1 L1b]R\jP7>圓B& ̧eFN *~2n5BFS3%3bϏKܶR,:W0vbn3B# 9iOHu$t| ދx7*5}R>##KhvJ6im4s(C8[iVᏐ_$*&H揟(TC=xQK>wp^kԏʿɑoG _98C{#?@osCi̙Z m/bP>5oԝM{wt¯w+6Gg`"Ojqhx FoH !-E5QPG>t"DRe{郒ב/MN5s(*t[( B\xL޳A(*RKQsp4<\gq%k2O mU~X!7_oĤXj`55#~J1(K+ڥo_oҧtyK#6IV}/LOͻWNf7~nf&SϼL{mSBH&8d/]Jh{݊4Q'BEM<5X)lc\kL-)ߡr: ?{F_]`ɭp_6A.!XhQLb{Þ!eIq]]]UeXzށ&C"o;m`(}w#(HwaLcJxff .1xw11;Eo򵠀螣_uzBWBwYd0 JYjhcGXqѯfc >Cpx]cս -qɰM=-P]. _*9[L&5L4DFyNvۯXlqTڻtX=f,JFz~Gn .䊾{GIdw6iA(( ~]C^4.*u3iA,I㽸VD4 S[Eeǯ3qP$K&ma/":^BNjf~2#%5SprLTx@eKA)FU +59΋ hᗥ"E.wӂcS߁E9a̢*@U1gxyOMy> \aMyHvcmI-KeH n FK;"{jAG`"=3(9X+!"ܕ^ktd\CP.Fl`<_ :K>ogxh0!FAjxȹ$nzk6%5]^ aMY`k>>_vAA`I9giU!mI}Knf]{F%uyunA aLwUGb7|V_3a)ՀgW\ђQ}IC>^jMhE1y6}x6ƏmEĆj,~XFa)aoJ1!ͳw 9|5'f)i0̠)Rf+q!sgmtwJ:a<$3s"յPUL'OҊ#o^*g:Pw_a?/R0 "%|%nqayK`gEЇ-?YEeOq_΋zpbFf?uDU,nܨ-_)R_˷f(ufJiwcͭjboqb ?c2 UNnud(PaA1]BS)n>x!Qbp7__ @fogŶb_д7}M[y&K-4|b6m-;PΖ/] #jql==Y_֎[+]=k^8Hm׏է()d^ HqEcgd,80̞1H߶Ir0`KқM畤Cʸ!r>KlsGg(VMAëI2AsQ[+yLVG( <*:̛,$m孾X3”ǟ\ E鎔?'7B>V)妜 kBAwJI,Z.WGHac9#_i )I'nOr;,vDq9{mm;e!')tjC(yDS(-ݪ+3 19jv_ԉkc%M|و/c->$7/5jJj$1kOY{r,҇/֬-n: h7KǔiTߘގǫ Xx_-06Fq0?)'BD+XE}꿌Cu!ph<͍maМ+eljfodRS-L2ke nW9f-'\(L7^S$ :nNLKZkMr.oZ) e6RC4016P n<%aYVU0I%q` rX)h~D;xQOX_΂+TO9D Cz狶@3|F1Z2}N`9.|c{;->( +Mrr77̼Kmu4wqэ#yRû+ni_ݼ8ҋC!6?rluMR`$QjEiom=ѦeKVapWԑsud[ ':~h4= jvtJ =SE{"Iۮv[u*'*Rm ;-f:T")gx]~Iq {n$).4/hүLu:zb金BI:z0|b zBͩ/HbyoimpxucHohO[oɆ]ĉ'{./F,HLkb1Dc_zg bxtO[[_E\1.Lcx=hs.uJ@Hﻯ8%8YUh"=>)p`QݓAMMPeeWtcls ̙R/%IQT@.ykp}ƓhTan[3z]F=J 3FsHƼ[ތGF*p2BW:mxbЊKiZ^G[b9f=؄^lZ vD9Zq9wgbi= 2CkӵsXx _QLBƂVX~AYJ1ur9pY<5} 0q'pfLi ZSZJ]y*S5;ICr|q 9:o@ߙau B8h$#NB,,&:cW*I@βRQj,-I$a VћJ~Ơ{/Uv)`wnT!k|D:|+C^Jw ْDJ;jO"ZJzm*Df~9NmC9{p-"6 LxbiQ`C bBP|k\_o PQ(>l ZH,:Gt<*[=EI$]v*F.U`R c5xz#;osWB7({s *FxߖrxY0Xpmt?q tRk8;!ުAN}5R='XʞA(Cp烝n-Cq0upQ \8)gc[vX5 D%]~a\E$ .kSD»s!facӇ7^ 1y~W'i:H.+prW^TO=F{`,I8.ƗBIUe<Til$NvEegN:)RT(`c5AtER`'޶-&)@IOן\)wM`gshO0iB-ۖ)IJngw`8+8.vqُƕr9(l1*'j&24q@qP\*TLu^6Sڏ@vu}y^+51,hsD!Z{v.bcݭXR[j%d^A")LwOz!sOL#I4 BjNBdj׏u3BNO_^"w7}qr^.]:]D"NMi?!DH R _ij H LR{,X+JE9WQ*ej~|/b8*3ySmi1<@ɄA.c=00A~8hѼW֏8*$F+I\V(lt8fy7%xL !d5(pIA)V> s6MT!vB#qT3)[cCEVثԥʹ,LϮΉԃ T#ft "U#xOsS *Vsc8.@۝N*6!=n6W}xGt/; K1jg4 kqf Nq2a왴-k AgyɝCۦVK_0K8dp\Uٷ6~آAE% l)B!xʵ큱^YhR* p)vlٳ2}C¤*{xzlVy9d!F&g00van^1i)+4 ZŖ!O\sgiXE6 3mHi^Z0{m`o1[rl1y-}bUuwfч볌cYQݞbNTvw1w3.Ticw~0{5D㜈vюs"q5vIXч"D!9؉P"INyrz0<VU\ @Lqc`ߛQס4=P>%#W}KsV5u&ޭ=VCZR :̢iY BFwC'0U4oA6lw J#1.m(C*Y$!Mq.|VO 8FI3֓Vٞ.X7@{:CN2rU`k5u1w̱.:y ņ):bՙmS<|Vq:./>Xe/"Nvp~q1.9rW+30T T Kj*,@2;|t~x<m{oO&Tءwfc3YD\AsAbkjH^=k&]v̶s%(Хmʽg;.}_$Kqg52r8j}Ƌ z<N_ ܝWC(9ڃQ8[([Mxr.vT)nksorۋ0gZOFS0Aw+fC@=O2V:CZ."3e$0>2hTu.~svXCZ>O>u>og c6hd7tr#i͒:N?.Gx5}FO-}xpa| itF)L_VΞ/}k9Фa)$}]j>n^>m}2ط=?(8 n 0[uqP')6\( 1Q1e":vЊ|[lmy-3q۲) 3IИ8 Ɔh!*"NMV"dYm'jv6 rfA}II ,Jv)}Cݬ_qg@|]gդZv_c0ȞG&']eRq?)\Ĕ!n{bNqwͪ!A@wG |<ŴףF8>g ^kNxdP( =M}W$9?PeR+LfngĐaD7jr l>8X_WV7;N%wHۜjEEBsQ[8cƝ̮ԘyG.8gXykX 7Y*XD NG"VrOLh7%%!y*ƴN_Sb V,ˋ>>NKg'RtN!8h&4(1ǝyvx# p(ibpĤ/xľOoL̓:wێ9(y9ޞ|&}+s9TC:V>je-ՃawEi;(D47 M6R *aBR[Qɟ}tR)1SI99`橁 1U95[I|63+t*Cم?}1}ÎJD0=1ZtHNYĘ]kXOsQ*9C$fkRҖ).bK0-)# &4Zah(0ۤ># WXko uf19zx99o)k'i܂&:;&Cd(1arOڔC -B,q*cr܉7_5F|4Vp4XJx,>, į=t_ {dmO ?nOPF(SE{SQnJ$6?sΧ34Z)ZUTW!r \$@*!A ]X'|!&{5#>wͣjI_  {5h'FJm9$"(`ăc5,A<3GUVcgsRêuŘ֎ .FPǑ3^j-V䚃Usmz>Um+'zǨ7Yd}kȀ*D jH pK^J`K IHO pސyf<+(80OվeOo\ <]֋7շ~8ʏ[xVm2R˵6s_;`Q9IbuA01/ Njp?E``SP8(3˘fx5ǝW=?gc6s6{)ԟ*$CٚqRV\LP/&qz}Z9S5'XWՒ;~o=U7ʗBo gst@BT_Q J @BCak .s Vw[[[I1)v}^HL`TꛓYuwҢ '/7~kpjRv*ӯ,/~:N uoiO۳gyhrc轎NRa++=Y™$}dG蝥>e%6)Rq4`nP*[R:J)*6FbЧ PzJLhiֹ"[z6Dv}`JqEWA 0UoJ^0aP=#|$#U7kic2x;GJ =\M.oN.[BelmF';t%P]ӈ2 y5=RAlS/0V G0`x\ .8Vjb f'e vٌO6ezK MuxrH"yoiࢃxrmt}!{]NL&I fLlCIR)\ )EC)Jy*]B`S)2|ӛ[nP%XAJÊ fǘɔ2Ddy+bLRJjK$w^ a]˛ԣ:` ]9tڗwnjI(nw~/)g @/nlNYB9hWJSC#DŃIgI(}WWd]f<2e`׌\\G?Eddutq_A"BދK 0swSp|Zi£soRĵSi*Θ*5}?ƎP⇑sT"[oD΍(e溺xZU@VT~@)MB 'H%n|*{ߟ:ɾ 3 \̌RhLV^C&+hŶ=ȯBMX.'WŃk*|O Mhٵ*k`)S:7;l Tb"Zҹ]:wBur19Ăб[+}^I 2i]P 7mΨfF|Z;cWB RO}9vkX~Gx0]x)@7;?@Xꠎ9H#)>ڙ$] YJ.;2N)W@cTFNѿj,n# x!PFȐ-ܸ#v +KaEŜ NO}& U="Õ͎.'<}29IO'M4V;zpsNU1_$y;P&`ٸ 6`iX1񋉿U Q2޼ l]Ӂynކ` Z-wѽ|]3ǣ[s6RN/᷃|:8:utDщUy]폷./K | ᓶ[PWnHC2zK9[8c\&I` !JQ˘iI;`@DVFa[FQ$&bl2JHdro1I%r'_AZ<9JE"^S1(U 1 TY" :4>Ή0H`(N%i(8*Qض*­J:݆JPJjpYKa!]ߨln#D,ɰ2ܐɋĸ@I@Dn'aEx2h O^% $%k aƁZEM,p_R^Vek+9>HR zm]J"Ft" QØR`'T҂6gҧC"j o(,5HÀyB$# ׽$oAZARd4LhHO+Vk"@Ѡsl}z0ID4HESX4 ΄ ~#82T+zLp_;].)Γ0]{~^0<9 ,*JAc(,AeF!Hwш=[YA$pxI OaݒR="qqRF h"hAO ֆa&$F]p HjCa xa_]O@i?ϧ֟tdXon Mw!Vep@t S hv}NuA/v]<$p}3 T3[ 1O3xk">溃9+j2OLٻ(s&WPVZu]wG IÇ}7t|ކn_\z]u/_Zz?~ gv5_ T*(I0'KT)#O9r<7d`ᦺҰtA}?d}.XuR3U,$UBzRWU@nF!nGx,C Vw[a?;3)WHO0%xf.AcMP\R=i(%C!te@؝f~e=(7 2 OE2a5:DR$qmCB*,@5:vy#v}ѱ8ѱi S4wm%Xwٝ[MZ0*4jVLdo3wȀ&΋Z EG%v9l- Th8S#3M| \X9;bSw@7ML&0ILoCosknj*Yf u`B VvѪ$=FؕVJ#@Ncpt˧ՁӨ`I}yC rORl }5W-UXwT5 ɡW>h啥_%9j`s[?G{n7?GR2Y51`e i6;yl7d* aef8Z*ZRpbqJqy&u]bFAUt}jwCe43ȼL 7V%Z|>lwW-[7ƨ\BR`\G*ۖz*-F5/ڝbn%V'Xٰc'n`A>?9 &ЧiN7yd&tƊx.gRˇFӓ?H>l__=?O9gϜ} SIb ~ws2=Ov4׫/&%4=14 Sh|D&7(bgg =֗|3Y@燭ě;s|}ٸ/5=>.أ{;o$2Yl2t캁p1Mr HAxF0{ P&fi,<%%F16N= y3djCxtښv7uwD55PI >JWB Xo0ܸG5I8mW|_xm4@#_w+t{1AeS-2\46d "`k)'AY{p>FaWeI@EĊ:}^Ї- s'ŝ7H+ddHH ꃂx|; jy2̻5(qd3:QHع}8 ) Min EHAmCP}lPYe( 0ޗZ,}y U]rTU\g8*a7&%PS9:7=:GG>:Bt%'sL 'Xӏ=:r#xKQ,`1Ǡ[~ o"'RlEN?,^w79oL~%dDW2yYH SL,~%+ۼJ&q2)L88;,xgK\^73$[R@I1$ümqiq?mdžnD:p@z{<43_d+6qgXcijbbVֲL+{HW`>ذVN{&s(&eZn[/~5HN0bt^ LR\/;!RWy.[?w =sK6Bo\; bmvؘrI0ZKf]ј-ź束Ej]%E{b想.}ТK麜8{[󳙷rvFIyc{Lʯ~j#T7,=jWwĬVlUbQtO- "U_=`kH>NiEfZLK5 1k-dDm5KϊK@`m-Ԑm\2iߧ {xY\>gQIThPdʋ_1Tԙ xT5sv0.V9n!zʃ & aZEf͂TYC͹o6%2}s /s(4+9D$:\@ͩ[ߒ4 ,\,D+מO䥏U-d-%:uYߺ{(jM#Jh{_#+w^M4^x8Hk.E.>9LBrd zR}X  w1f Bk%:Ϛ$|᝚YK$F[v~JPQzIzO3|r?/V$A4ac ˅?.AWY6ONvmb/G]Wa=u.KX} @`γebQՒGiNSW\% 0җ3֥{$qGMQ*WqAwi[=4i?y гۑ`@ G ÇLOF|.Cz|۫=xrnG>hQyho|UO|BuSth:xbsbCl ΁WarѹIàHY#Gʤg"[U[\g_|e/"~qW*) 4FF}heK{Y|m"|(0ry;_p4 |̽1&c~*x_-22a - &M0>DUKqT650 0׫Ԝe%8kȐO~P,:c*y<v%[mc{*ο:?)zfI yq>a{j Gsw_nQҼPbI]PѡМ'+SGv)O›2wnAExh5o؀V@L5AXI,EAV LJ˳ToTe}ql#SWi+[rzȜ [qHj1;w:/vT׍>ǞeZMKm=No4Fͧ1zpHX$uo[@Wϸn*f)Dhy.ń0pAH҇/X@񳆊>{bHPuUh wCˆR ox,P1A%Ҭ@R0+,x & # vHO`~,A crc HI{ΧryO$g8| ӘXzbGm[[ō UhVJ6jCHz'F[ɃS 2O``Pj8;RDmOؕmr킘Y+4@s[>BtkyD:e5Z1TFĢ`3cU=㞪^A-1K`ѣQRo0n-x1T!YIB:l?t\G>{U7jsX,xDqy-ه9)LɥXj&H]eh9;W$&qr5q\j8Q+`18zӝm?j/w"z),$ٗU_NwzM%{J)؏/恔Ыg=Jc+;Ğ2Ȧf06m5rEjʾVkwvtn?jwF *Xl>UDW3} @Ͻ`QKӶ- k 6akmlpG"RW*#nZD;l>#Z~Q ;HV8qoQ4ƃU[לP^v8 $\pk-8Ҏ*>[?L73x6Zl)!{Rt,o[#cQ*q)Gb{w"c?yJryF}c.->pdo;f|_y/y??3Yw/eyH߾K+2 FV  5s~[r{;>l2P;"6dy/ʋ#b*/Jo{SrZlK*/*Z7ج[[ ?5 n6m8z2[N5ʋ_b?޵Qk7DW1ꓑKE 疃"bn L]  5ŐE= "j[0]!.&; U/x /w9cMb s29g}t|RvaOȢ(wՄI12ic&;C.o+rwr|;-\X!#wQ]׼1:f,o 1[}1vw}5uٝ۟@{0v?QUVFr&o-/z^y'=ck Yx Y?y=!4e$,Hu?9dryKֈ+\J,3~@qx<8ZNa7X`D>[PSi#l3DS8oVq4ͦ 2>f5-)2I8F@2F&0 d&{)nq:7* 8/Pٲ兓Mb,V4I0.Dx(%.W,+v=|yzc+5X=~wo^z񩤋18q{3{۠5'9{St6{G5Hۿ-Gkm ɮ0g)ՐMt\P>vfW3o4dag=Ϟ]aqo'rYF ft]0vՃ}/cu;(g`LNsޑg|&oAv2ON0ojܽx㠃]BNnb{Թ߄vs+9ۨz DUTJ9%9ZG)q͓ "US+pT1vQ%g2͓OFYl>2|ōRjLnk*Unն-tNbLq/YHme9%WY18ڊbĈG/R] -ݛVϐd(NS{/%wpQ ,p\F7!Ym] 1 +UFNȵZړ ř  N+\(##l+PztbaF35z eD{_&,ǭ&෾zA..Z6sLg-ϒMĂXrspu%# |Br&Xŀ!~aVxau*n>,-xًO[9!D*؋](bIЄjPijt#lf1 r<@^7Fc(KKpu@yW:_/fB6t)"p\4U1[73J3J](xːbq Te at0Ϛ0JT{?lw҃S$3̋p*(qTh,&>ӶnX@LAZCYѽ9rLueaX}~//>z8L[^S :+|?90dKM !I-&֒b৥[Ƥ&kK@${!joi*biK?Hi$q)S]4LId D\J fO#݈a-FjZG?\+ePR_Gqo/RoZB n Hp`&9*f52 Iۑб O1q pC#s꺹?kϘ,c"`#^ϫyB$WRvOU[u0Ԟx@$ړ(4}9!u,8tڹ`־fuTy}Թw!̹wj+V9ddFpN9S V;V}XT`dKq%u6:X5HgW|L͠'&/P>KVdhL)Fa_Pwƹϊki^\d֘~+pa Z8YOkpj츸%H wYBڣ&;5Vb-3ke}a6I|෌K >_xguDk^Mt1E,zGN w\X^+O*d^}pεt?CvhHn]J$~ǀ1˦ hգzIvk-{|} Q&=UnS A={yLvnBo|e &_/W(J@Qѷ:yp8qN/1Chͩ!֌ zTwg6+Oᎌ!ttwA2@toj$xf"S`@8 ~ K K G>^ 3i71bÒQ@#'n?9BN0y 2@{Mv LbYB0$q(`-k8 5z$MbwަO!--7ybIn"3W^ s0ObJ `,P.Dm&=5' otტْ[-]wɽy%?y^MDNGEĎ㼉CܞyK#qf%޼\pr .QNF'GG*ͼ%ONΟؑߐhD6uKs"$vF>;lnr2LivnƎ$v?902["vr5_Ѯ?_nE б#bVyMe|nq56lkdvk.l^[;3̦ oKS,*A+Tu,`4wTZ&9pݔ9ڪ3E^",ZS FI3 ^|sH]p](fXVAūkU!joDwT4'ź4R)M6o:CaP5$f#1embǤ/C`mO/Rd@oX_lSw/k1 YkD]oT7.PP͹y$g̚˭^қ7ޟ:Tf2v(&OQ {)tpF?dnd1OAM阙RS!17lf?fa \\!0D͹96^(bR|cH\K]qd91]MXYLQCxc|2kWJp^jIj@hnnr^c*Xsdgɿfu#ò 7O|ii$i/NdBt+Ƌ"d0T8*`Êpx6^L[K֍gvuyH&9Rm.3H}aQ/a%+S2,]|!]s5uoF\믠!G:_m E] 7/ЦEsJ1SGTw-t2|bD=( y#SkwJ=`Z(D>؈ܼ$:F$ѯ9}5i6/>?&b攰N6s 26oǜ=.ZB whݰ%CNgSv,b~\eb ☺ۋ퓝ݲt; ;߷y;*D'.O6 7/5ׂSj67ݲ1>_.$h3]VM ;͛&(l# 3Y(EiO$J[+x"(ӈ*LlF,BA[EiMe>xQh+U퉅aHp" -JWV6~ǚ>1X=l]y`\~}79 qȱis8~IWps?gУ:"]- ۰mގa di`ѩ?ܼܰ$V s VՏ`X].&U)T.mݖ.QvmA{hvlB&F %U6ា]@ S x*P!OBc8ӂ8(V 4"Y78$)x!lm|>0V TuG\Ifє*qy'Λ_ܔ]Sv6;Ʀ^᜔Rvkʮ)1S-[,oنѯՠOoTCWǾ_ omK6\ vuP̾ܿŞ)׼a7?|˒̲f]=ءSjՌN; RPCq /5hDwʽ#F}={_=Hy(vG;9ҖUTuahn3T:Ή8J7iZ /k~vб)M5iaۭz*κ{JsVλ&횴7Aؤkv֍_A69c&횴N}38fajXTy-CB% } ~`⨌eAGƘ`Xz*~y!˶jM_rI$io%eƝxSH00Į4ި4̡ yr P>llYFp<s<\;V6u )1 +vβySfvv5MFAT2p/lO ailPnT+!Uqi?:{0&o=ժYDs]Ĝ,Ut0vT2q !TiRΫm& j$Y ^cIIJ\a[R&ߏeCf\ fq{T!} ǠiBP zd)8{D}1yʦ3\g<p-ӇIO?}"t!y|/2cQLyt~AqZS'JÙcXHe5DW35ӾJG=5u<ƀڗQ3>KkonewM+ndRSՓ¡&.8ujq@mZ/*4f/E#Ŏ1Mo5kK[x݋֍LpNFy~U$hOMq>ÛtV .gqOp1j8; HN\lgf8 Jn#ԃ\||1ܗpm?Yp Q]_&aZvEiZ6oǴġF JI,7{$'L+eF=Su?{mfqEopP=8[5Ρ5ɳo9wUjY04n%^n7|$e\mގq[72>׃sz?"9qM453 ?&5u٭s~W11.Li#xpɩkMqF6VZ= uGlA*=OD,FEi[v6|Q(b9EiQzN6D>oQk@MUV#LXM=90!Ҽbv$7/1ФIJy Y-֚8ψ -7i@}∈2KpswGfw\]A̒8 s=Ksv{tZq{o)+r+ydRkewmCŦY?MPՖijG0pcM&Ϊ~yJ4|bN s <avC+,Z *T`FdS 7-1Q`>T E Nd_O=FC$DM3%!8I~36צ^n IE{b F<%Ws34iפv#WqFvfUI_'{g]̬u {ק~gwsC[C?qJZ4qJޘq8 k.N]MN`|))s*N]916Vf;e*մ)yCPIs<"@SvMMB[P~κQU 69T&jҮIԧ"`Ő>#ot̋NKpZA\h 4ZP.uyr `>-x,F}ĉn9rPbNFAC0 aǹΖDȤWx3a$1Gd%}Q8i ni#L>+'WؘD>X+1(:2A6NnzFmXa1<| r L9 ,&(g;is7~${-2rWy3K`r!WX9^^br<S`a,_6￿Xy+\^-?XUD)]iPt~:ˆF&/7 J4*`) حU$yGqd–nK_gI;+\2-2H6U{3ogO%uă$s `wpbJetխ/nMfTs&~EܦlhCԀcrk@|#;!|?ZXٱtY3zfG{.!w[;5^}bnqgxҁCc#xaM^=9fz jdGS^qٟ1Xf͙&uVCXTbSe8:/OaD*S01;5_|Yo WjwN(`[t9ijb BJVXh!}!vs -fm,K2Tbn|WVs&9p\w՝cuNsl$;f}ƑNmme Y'W4d]ͣ'y'摨#`ɔe0Gj-ZwNH D).,1~Jڭ>gd^* !Vy==}?j KJmdڮ]Za|rgtt=V_Rȇ%w>16zB=/נ%25lW+UUl|S2+eË!@Ӓ*7ϟ %2rsMN bފ .dx##uM&\Ȋv{\n{ްބ'oBQ]=5494@> yDFcӳ^ݞ0;Qϡ "Y@zPf.o+)bNϺw߬E.zQ)9\$D|pU99bS0׾mUBY!אg DM\7m%G3jDm9%eI[wÓv-wGJGj@421M/͡O5)-g@N8A#5hw85;RuC]{aP wUV@>mt_2>ɀGSrYy18Crj}­iv[z tL[<>m"M 5m],Cۖ:1ov˦k]kC *bsZ [Q#Dw/,`9u|l0-R/~,Q#L` {~2=:[ᬎÎdC*3Ȍɱ9s^ƃF* i ;moq]_M^_eo]ċ%=x6d`igH.Q!LmZU(3z恙.TS@Ƥ`h99SGF퇧giX\cc@~X ~k5hS0!ySC#(jgEv3:vd3" )^=ÇD11;>-CDevCr2ev{ul̡H%B\91y*Sj7oemЩQ9Hfl3jDdPRU\ۇn[%MzѸm4wZ ZmN};|Fku{b։:\KsE*c&1Wn)yPЭ:QYuybՁ,U'ЦVZuˡ/Z~u u2>D>$˚.oHM:51uKOEboUjn9lþ7k^OW@W\Pl&su}w)LU66j~7t>%uŰ T`keTueڤJ7Ƣ*Xt+zA}ц!Q[K̛`>xOnn 2Yqu @Jډ:R0D*9P;KhR;vY|sx9tji;&jdqY̋ARGHk ux^9Zh1&;83R;vy|85qQ;<>Rd,{Z^[*C3On I\Wӳ"AM=`[މc]6bEWסic9R>wu舰l.2{|n| ]-bZPoyRR}>窀m6eMN~HʼK+/3jMi ezO9[qjU4-@ȔN Ck,Bmg8Jjs0Bil { 0[/P0<2ͦ6[RlnCVl 3:ʔ< :َf'Eo@#UjH%Z' $l )ւ7CnXYz.= w->`K Kslŗ B& x񌬃hXpUQ-> Rqm#[c vVT7(gd7YKHچ'N7ogM/w]_N?_t`(]Ǩa ro/ҥmJ2]r1IJ齁ee~vr?Zbnxs~HeMXp!EmSqX ?ijҺn;r~ՎHW]HVˮٙPf"%U1SQl>a]q-6"2HvqCxTuց!Ɏzk'`sčD*XbFFsZQvy|ckޛ`1䍟0/Dtѐj>qW݃9:9{-mm&*&דT5ԋk+L1E.sg;f1K\qJRK+l(C[珪L*ˌIY̋zY{vſ[z>\oHMX⪪n#)~ aIl۟ϟk}^N7Wt5Tӛm,ⴻBsჍ&#> VpjF)Pv]^455 knNwe3d/4s xRu:D@ΉOi _^N/ov٦w"?\_iݝazOGL{`y5lAA^_Sړ*#RqUhk<'՞X`֊hd˲.P{;Xo=?T\otu6Ḃ|w_&lVd*z[i*7].ǘ+eT^7b(xWnH% 1sZBe 9|n y t|!㝽߬"ȳn݊?.c"d2yҒoCna*ɱ mp;bwܓॼ||Oցc LEF[uQB^nM9gü_(ђ`5\&_;nρ>afճi i?D.ήhj릍]+5mc셵_¦xLIu,[0xAw ·MP;˲O:߿k<$W3FH"3a\ߤɾ.nC͔E3L'܏ {?)Z!rZlwkUVXdO )Xvli;"f|F)9z*_/|rZ AZoE]/v{_"ǶsQbsh+G5Z@9"Ӥ>$~~RDX ߮6m9aC]nwhvc}t+`6!*1v?+[]w37*"!d;;[yӕ^^?7F4oK/|v!]J_j cޙ]WB0%g=5=I@/ںa?qGa/zD^q$sJd/T4d˳D'0lIA^Jy{cDjS^Wj,ʍ0Ν7`Ϯ^庨Wiېo4IPPcM% 1!dU2ьt&xx u A*ZoE]2HQk { ޵5q#ݑqGCU~8+٭*b݇=.+JTD2}*4#qHFG$| "8i(3(cAy銵oIqoJgm:^:ڦ=e/N*kTR䀔eFT8PRtC*{pRr@J3 %mᅱ\L4F~D3 T尾et Y-4hc(g7ȧQMvlg)!Y3![݁ k"NU0VƲ"1`Z/'h7å-ӇE-*ėW U\ZwH%GO8v.uqF;vṝ/-WJ/?~hz}zGgֹ6gM09}[,IƠB˚e \HBA8ZyyUz]a/smGIhT|Rd pDsݞ%2 .ޘ6{yd7%՜~\A5M#%(7tTQ4qE9!~l#EP:@Ԅ9` kЮ/y2[˖F4[(^+t)2(/+Q\ʖ*%Qz;j P33p+ņyB_ld}dRK_t@DZ!^}LR^'!E.ě]>4[5;b(EV ׊u6GdCUYsNpxH3P<&T\"v>0qUQ Uԃ-9nքu.e*`Ț]Z[JY5=H-!ivHi@So;z <0[68enKh3!Y%=Ų6@Nh#I6ÛɴɇhZ#πN?l3P6 @;NIZ\GE;yz).G=VPqCp(AE%a*N+b ʎtv|^$5RSjJɤLj_v?m] $ʯHeOJqr?uoL{{^0Kr¹6skHv%GnG)@"9M6d} T '~$ ^I7Zd) *᱔ٜ{1sNPe12ٜP1LYKUJesiaɾGHF#fLe*~q=Xfu#;S`DzU,:JܦzY^PK9(:O#WRH[YzbUϿ7`\dE;GoZP7e09X2eK)1AT(eŒ1 ͖LdcF^sE_tObQh9jc P9mJbq;ُۅtuyͣv ,WGf瞘-dsO( i% AEN7hM㸡Y:.dV٦iQy,!&aj;aGsx_QwL^B\]n𓂡E 7R\>](%SٗZr98R~S:dDq+G&-`SYId[+Y6)Kܬ̆ce䳒|VzWJq [[BӦxه(ޓ11VFߧ{?^WV5«إ1J_]ǃٗ{fv4Ö=(IJe=ܻl˭^l˯lK3AAm9!}票0PS8 6f\p*TGhRF^>_cvvfkHWXo0^շUp4 y5$1N*\{~!JiAis`ZjW. 9LWvpWl#*[gs0b *;,_kF2!ڥ\-ZoeyFjGeA'fo p\.3=] 쬎}ip95|Q4RI uh,7y%Yjo/^8]҅pv.%j>.`'ᗫK;4A[.\,Rz'OPb~wp!,ߕߕv$ǣwF Hte@=1.K)Q=$Z )O}7Qg^KyXҝۉ@"v(kD|p3E|{H(PotrN=V~77 õ6+",%saTC:FCJ6 j9C>~r!TϕVSLam@THƹ;yłS ja-%#.M C0ZFc`ũa/w#FW}]{Ժӭ;qmw.Nm0b#vBI6r(Ž&@?"8inl컉dqPg̤hhSld ~DžLgnG:e.9\1Hv^gͮItq3H9L9?iR<%fRud!q`8t>e5u*sr2*f׃UBmqvlo>}[~N7IZGs[cQ.>] w(ΧєN%.w;4?Ioj򭫴s>X{A )HEAp=&G9t<;!${o!Ӈd.(U# <ͣl$1)`KW#7[_}mmu nO@Mbb J =Knx3ܬVt}ZhSTC.M#y`cw[oҁό?o5jś [9T1ëa3XzE}zx+_Kuiî8~9q8}X2v@RLo{SݢD^ӀX!" ~n4Kf<·=yŒL1ϣ/^~{Z/2>]K>~ͳqbUٵ/.SM3f(Døm-nI/Vq؈cIEb!;ʑj*kykhR(8H9@g#\AWY8Цe5v_)u2+(PExڐSvFIIKį> Quzm{P@Lݕ4πN?a:]ט}FV/d4_S1*2S)ef@`y!ORy WrP"e 푣^)c'VJ* IS -*e-]lfBԼ/ o#&B| :WXQ"QuUUwƕ$٠m[6ݓu-9PSk,65}GNqgj/?|; ߴ诃y? ?h-zLB6d_'#ec&ݎwmI_!v!;s $ &z!BZR߯z!E=3 % 3ݿztuUuUٟ,/XEZRO"x\բ.~I gb[lvE:}Cv{}~WpkxZr]:_;Wp%g1SUdtEl;adkYʫ F|0W}*t0ZLs9CHqA!M3?3q43aXuuShY(A\D`y[تarWW߾mX:wwF5;d4[f#'蟴\ ղ\5IVBӦ@,[1 B,v[n%5M~=-2_|283jW4"35bK!ms6 |,UD;\Cە!|2?z)Dkf2~\hJ½S99NO|ZT?̉Un2dJ"eq%{f4Ys[/S b x]f? zqYQBbnLbfmf'|\k;3wqkI%*u釋k.[}u|w?O;XCM{}))h߆aʷ~IP7DƋ;H8Nt%ަtgsԟ[Kt8gV"2.pˠ4 kEqnF~99D}e|;OQ=*|1Vr'2 u4ӼgCL 38Ϙ< RYNu@,(۳wcRMJx|j?={:ab_^!Ӥo Yv}~֙>:A6\ϣ5)gN(i|Bu??Pq! tc"G4>`?BJm)yu.Jn)#wzʜ9A bX%c[6;BRX'ke0`\+YWmdч1,R+)qa큽*xM b,ܱAA X)0+y &X0$'_3h>Q/h=>tGVݣjgQuS\jڋ z#)+(r{J\,eYX3kRϲi+ʾb+rIBit\qR σ !ʞAiY=O8 B0թRcs[l02}ĥ x|ƻ3o&@|w~y}y}Ww_p!acLq`( 9vE/;k!yGbY9C1/ھ4a1b`@,Yꥅ0t_XtjoG^ޡNJӉݽ,g=x J^7߭qB%NL?Ǜ%WX\95GG<y9ʑW`3XLj u6& %ZSArpZQ"h~ZWھ1+D$GMdoJENXJӫVEbG3b&=LU 04\ SXIW"Ce#m0{k@@بtLo$Q[(9lyi!eR(55:x&UN3mP E(oBDQLY& *&:g9[)ğ OECF-D;4x{s1$ c#` -υ9f:xrOE/8髓.ECW1xrG$3GK5gL1)NbZisQs/ bZ%&Y]kQ(]GC&l0_wj^bD%ܓnaG0gQ>Vl!뵛g$>S--JP5׺m\U1ª\T#O8Z+қbFHmZx$&yHY=psr4}kARYdw ʔoW%~\-5j FO^*7$ ]iURn~Ȇ5h>~%vn31Qաǜn:K2LzOfM 4\*~`oاh՟j즃' n>y TӆޮkN OԔ'k֚Q9" fK jp%3bea$3ȵRi;OjA㣖~i=ST`L ϛ\9B\t9sXpJr  2!$+P$ j:>R6g9X aģyy+~->Ig"gw2]dH:|6WIJ#Nî v2$[P-> Lt2;Nb?&t\T\s]I̥Tvrb!$j>7J%baDvZLb@Wc:;'JyL#$.Tdg~V(</'QgA3lY ͨ7׽{ ѕ/C`7RFJhQڅ`38SBKrm㹽zcmJ0EOînk]bd#ƘSnhl42c\VgqSV{d*PC)`0$êɌaeHXvc<eN3<| $3:E%ᔋ;\A$hyl ~xK(w1}K(9J†lk`6}_oVi"0'b#vNk"C V2TD`G#1 7Mvcb*qTOv9$z+NVZLIU:dWdjWElxWYkeZe8Y.&j{ fsZJ f̂i<Yű͉+xF. 1X"V#B`:ʼn 8RlbLsG0_QԘDZqR[{ 6AL[LFsy\:W͜Ldc&ǰ `yL[bwY20zad%7J$2ZcF0 _Ia9iLq65eL^m+p8kB Pŭu&a?%Z`$:L/ߚXr l$= [BŭWZIvop[7y,u At9\nܼˑq[ZtNVsllɶz@W{5$FC1=0'5MI(nn+ %=as84=z4R10~:%s1X R Ik!9ER˙˭( L3c@>qrtIߔyXi~F@҈iCc,}mɽ%- g NH3ƻ  bjGVNzNb \Ac {nɛ^'op@ c5V h^Pݨ\@ ﯮE̴W_]}PsmsuA8$u~k['$\TBF;K[ jGZ,ҎgMTT {^{l=uQ+USU9c8/Ƿ̌?3ʚϦR5Ki9Օ̴_mw7?LËK<șV+f1'_4m\}Gnޙ$ ͧ$++nRKXVMlP->8 /3׽0_y,6ɠ_;a0};5ɨ?z;9H SOSqwaEƯ'U5IC+ˍ2,~/f|k]dYgee i 2vY, 沤9l/ԿJe'o|\P*O'SfkgJ%exuJ IzDtP#RǺ4Ú$ J§MHGt{{D7_F`PV͡&2!~FzD]fj8$23k N-# iYә_?q㧃\LeL$}ȫA>`~o.뻾 z7 du K黫[ l&'fܞ-~5~}?V/x^Ɨ.o徶*~^niTx_3lEӏ7yqid|m.=pk䃷7/oY\99K=;8x6'~![j6YT: 4`0W;CDWtWkETG?^o(]YoȲ+" 38$H238Ftx"K(o1O5bS"%+6_lUW}Tu./ӬzIMsenz_dM_fnܾ2}K(y5ghŻAr EyۏO۾G{ lhq} /24;sc]M:N~kWA~6&R*- m{}x.NF=AޏC@dF?g L0>>d]zr8ޘN;靥oiq~j׷6YkQcU[,sYF3D~. eV%mwlׯ<&'e',]lXu&MT$0`!/<#n2-۹6DT!|7J1!7Z#_kaڟdh"{K!7zPJ] {6Nק($}кb^V51+ ?][|6ީ(Yyq 9RxS*_IY CO:1 fq,ACĄ7J-:8ƕB65,_z@(dǭP뵯{xtٹįCR՟g-{J-Tl$ P5̦Kt k>3Q7HIt)Vu$}rs$`EXNf긌 8L0, ~'.W{sօNbRWN-agu:Q`R(3Z 6qd c2XRb젒g0⾎4)=&`$%FuDž&h}%D i(]8uo^ ܘŸI7;[fa#cD&K Qrf*>Rnj$^Y4&]e@ )UX|2!(w,2+DKsEuvv3P|%CK)2vX)z(vmE)m^l+vQ1ޜtsPt֬yy+QJKi&v2 |.`"Θrr1(TƄ */vf <]6 \G;#`Dƙi P4e@ F8 1ƠDrfy11`+OYTsT='6ρYÇpއ?+ R.=#DfS~n!]u犫d߂$yϑ~%'s=A_l.KD {E'_sOV1'q)v'ڴ@#v P_oGD@ZmƠ|*8;ڥ`*]j8=ZmK;O;m)>v bӀ19OoոΉTjF"F͟޺]\@!ĶdTW(QxiT)ˍ>L@oodS'5uC%6uK3Gf$xs; Ы3X(>N+;$* g[6)de9lxkFOH}d6*ʪ6kmTmj՝#lڨH${ ױV\FA1G0lP #QnflX^;Pͬ@a$E5`E$ft{2$O8F!5'O4xtYuUZ%ي Y8ٺ fTlʧOjKpc'iVC4ƾiH2? ;: 4TENAY<棟A cM-6186V[fP-ecDu(%c#B-,(-ngm<3y+Nѝ [M-P(OuYW(4KD __GM^>7tpѭCSl^ w rk34MU#c}2ıO5(]edl14Tf;dI,)F1ΌP P0 e5cY}2~ߕߘEZF~UY[%*䷘ݕTH< Բ"4e?!sА L{>E1f b4* "8kRE~+%KM'_hב&}R( E-@+8 HkG0&3PXq%~70ꩿ*YujTvy:{-櫎3~& _lEّf^vNc' GEz s8*{oc"k|7,k|>@)z9W,UgszJd+kB4ōqj;-?^-V)T1/.l[ ypoS _C 9a(il{JտX~ˆmO*Uj[R 5AԳ_gyJRwb6 D_E)boްz/U\,\}4G1 YNFn.iKaD0^[0Ts#7a pSն=2`kZ#ɂh?O??)xdϣ;SK$#zv x dv6Gϟٹ0͑k̙ɳv2lmo)=OMYxJjf#$iS5G>6~7ngVH8A)";r߸yd$%zx.?V>&Rg%) BCE$#iŵp15Rin#PVGN Ai fkBF(#1+ceL 1-b$BXC!NZB0J=瘣rR mWw A Yf>tuh7dj /}ȵ^^u>:rkI&BإV'IeCw=+{^[(ӝ7}1C0OAz F}ګ 'EͨܝΉd ?_Dy,HJI7"(mcsNV3_j;dKvGOh<#h|p?{~q__w#_'`Iq6m$҇KEޫvsvq9WgTHʹ$~= ) ( Q6O?=s:_^¿VCp.$ K0 &XKɎ] Y5s6kurrkX*KO"K9kX-eV;2DO1)pcHKyzgd {{Zye.{௖))=ZTן /]~RaiZi3C8:M?+'Nq ?dŭOq{@)n?K٧)n}]>d)OeF1*tfLK55' ¡}f&|/y6>nЍY7J4‰ {5HE\ދދQ3_hDžL]kEYƘÙe9Ct\HIF Y'"gCbH_2Ts&.&iM)`GLl_b4 Ŏt"QNqO'힆=Y;H"AREkDz(!tIu hNJ65H(Jir4Gj %X{ W4.t1ςurkA(J,nrQ1 4/tYlu:,wW6*VT4BsC0o_.tū+9O}{R9EŭЭĶ_ /;-=}|/ ON6>?+Տn% 3~KVkptLNiz+4}XqOIx£]ifsVBpZr$юG7ډ}b+/pƠS{<ӡ.Њ0p@O F{jYZ "4.:E#aL P "G pzVOp[:zw2],|36/嚷$\<#w5#LGHytPO:,Ѳ'bMg^ze4`YiiUy (DAiʨBHB Dk'"(]$sJpn5D=R# AiUG4bJ3H゗CZنFvC#4$3T[$3ԫA.q9ΉJ\Rt)ϥʋ 1YTd1TstW4˚.ӎKNݶZbQk0۰Hλ=#:4fp: ͂ b1fF YtK`%c>j J 4T<'.dq@(|)&TgD-Q; ,yqܜGmi R~Kf /jI(ey@TJf5MgLZ =&-uheZ*+%il9Sq<6i(T̐3N0I}]m}{@w7<Ƶ\tN 3QX#}t&MăRTmd(*!T0yJ"A)R[IR )XN='oI9uڋVB"S+ M?1I"p2(ܿ:,k= KJS#X A4aVA+f׿[vo]75;P[ 59f Db%%rlzg`a=yh;3Χs} D="9(8gd&hbCB#.C2ih]anޡ m>UY٪lǶlGj(*L] Yv;m4M?ѾCCfk'c5Tһ>v||})ǗJz֬4Ũ4z)YȫN.‡iL DlʑmUyPrٺl93Bw1,ܧ7_,Iu #M}gg{_ doxӭvCQJ(LBKeNeR~YXb YY*{h. ˃!mai{0qܗi ۰xwY] 7­?Ro}J5#Gw+CM?tDot`Eb$4͗t~Nw]N ֗;Bì$OiMy"-㟜i 0FRD{g'XM6vmcXVKl(5gknrH0Z|^Y YϬf08`XydxԳ! D=6?,i|tsr:*nlq>Y# Vx fV#B$Q.R̃@rI"n^UkYMJ gK R!_fEX3uiK&BGCUt~WhiI {+aaF!V*D(O|)hY?"௓V/K-x"5.xGȏX~iOSࡎu? >}ڍt8w {|9|9!y}b2%cXm)[Jvkos G97bb O}rpM&u}~MΒvFŃ~VfȐ 56S;/5{0<Ar밠 `V>D1la8VQCv``AT[)y{cnDW(`94O\50LK I_|%l '1/ݜݮ!f_Q9L~;JvI$(Q  ]ǑW}9P]]0O/0 ι9)^*cSMrw\rrfKf9]UuuH!K΃ep6M0Ղuއ|Ӛ5m;4^0x{*/_{;-Ǖk^vU0|  1Cb/_V0!o^E vOv=ݍ ⾋oCoHytbǿbB}R6;sg?|۾94{oЉO['L} #d~ʃ$n?|Ia%XsXgߓ)z?^}\\>n?,=KfauY=}]Tя}1:^hkύbo xIx0"#Rފ|G{2.,b|V|2(8nDQLHXOs%8Ju~RձPC$T39Dfg$sҢ93a和D47?}KqS;+\!#*KQsFgDn>50Y+ݿ$vza6F2Q5$>JH<&ܶdOWnK+xW&aRܟ`EswO(mY "{2Q Ƥ^dHސ` IEa<+\7 ]UҰL^l5e:IKf->>^-k;N'c%6NRA_X%ۺ#UGnYrMq_q% k}YhqsseXc/?v|Do.(~k"=exŽ>)}(RP-߯`~(S(ţS2΢uRh5?>noRKe{յwim9Blֶt~b;JSZI=.hx;EmRdIՋ/3^̨3z^0]0bƸ33:vߕv.gJ8R0 ,OAu JHT4v"ZNm@;:(Q0%A6gwAT%ceP`9XOzxkHB!~xU9&s~zfLkjØ*%;Sk;ƚMV/ͧ&LU--xh]k>>\[Z>PTՃxVNOMHEBJ I<+&7"㱥'vH~XτN{d RF$"ضJu^i.IѦv3:&X)k:o< 1̇YF骸_$wIb2ibΖa¿]vŚNhm;BD"(+U<9;'\ 珼;(+V1F@_~p>llnpvOaO׿؝*tRT16XX|Y~F|u -_@"'DVD]wӶT+ N?L8 vsFvvű T̴Z,^qX-,(@?n![W-Yh+0UjcZ7mgL ڝXGWgk^^ҵ] f$weKn=\]Ur⏽yP ߯Cl+=*,?Z9ƓM4;+x因u.I{ mc :}:(FCUN#m2Oed,@&5-ob.{=9du"έUek(5+J÷ϥcge%{`B,x䀧(ѵb26pzT. YCl"Z o hH1hf) 2`-:+drz?SE-̀ s`qLAJ3ergZX3R7T]DX  aD7ǯj-]'TaiFp' D1 )L!tjџx_5/1xν=~ǭZ'A|^]r6. tE%%g)[iz9Y>d헋ҔT,"ŇV7Q?V׃`& .;XMc1 v]~Xwyh|`4Y"Pi5wfmd2Ngcx1Lܣl6>8Өa燊+ꤒ@hn)g9@L=@A ||y{s@M662l}T"EPw: E?D MQ-XR<Ĵ8eB1H!jkwht]rNO|Xlpcd+zB ?7[YDrL&іidTxBtɑ:uɳs:Ơ=:9fa<8s}u *FY@Pq.ajtGdV&N^,g=JcAo5's6a6*Ko˩(>ȝjo@M!gCE,ɱ&dy Drޘv5ԛB @Po:vqͧ-YJR{hbfTc"MrWN3]DfqbPF{^bqT-ՠDf9({V+늃kT4@j3qd銃$}J#-眵ڹ~쳆2ȑMy䴆^MvDc䜧'OJPrSJtp~+%+44D;XӥꟇ\)UOΧWf,5??@4gXk_A~^SHQqRGn'Dln"WqM#حbB'h`J#8 ÐRdUPQXDlC`O2T,M,Y &lRtRzP+KZSKȸ,' N蟼r4q`@Q{L(Lke zg HrB]̨<려J*w\c2$Ct6EQ9Nci)p*A< ɮzxJ h2 ` BT4@4QA&1:#a򕣉^*pPYU*sPF ^*sP B1ٿ,o;RN8$0ⷻۯ-}"m/>@}2~soٜVЋ~.Zvmtݧ>;DN2Xȑ{A嬌a%wPI(nTS<S5b!y=tzDM0IK 5;7fvN5c=|0]ӥDu\M!Z(o h*/>>v槯~Nzha0Y#ŨBa,(!%24INY<㬳:!Cc됉NQEVcA0|ć|TLthJHk51@1iL<_ +MXUU_!T; kN:QmJLkQN*( !ӄ5:dծϧ;dE!Ax7Al =_@*|}j6_>Zuw% ^ҷA$'F0dC&Fi38(Tb뜓66dIZd"Ĥ"0?Z=[Zd>3 ƪhY><2 c֐)%*{i15p4g&sl1NɐhN8Lc600>x)?֢Nzss[TGʂ~ݻP$hs&+!<0W*xrHs:F#lj.<}WB%alLN'RhfoGO|$!`Qdh*gd؇L|^AV˔Di]n8J_|V~:Hwsf{ӿp='34KpYpmDED*!(\`Y!&A;p!acP\`̒!ޛeX "bB(́q2+ۇDcM_ιh #%A֧ޫ7Wo-Em<8" sѣ$g 1b;bTt;xxO Sn4p6g 9sC8~8 Z0fNjNOT| QJ7> X3.ȖtUӇiX9kfZkz@PX|tv9b}> N QF`/)sڝ>b+ 8)4[7:Y#`u/ϖc!A}v'ιCH aOqcIO{Vtߪ/s&Hξ%jkL],[M%R7f'@9_uW׽7eHJ3k@d1/"cuzY>f|"cS1/v$M<擄x")a  dwHj1)ʏ楿C\pT#g*d5S,ՠTLeܰ\hb1qeYZ$4$j:d 4`&uoNьtuP$@ryMZ v#FlD#srl|rejtbU8gS3䜡(5 8/^$+ž{)u.;DP.Ty4e61OH1eiȑп!ݏſ ݃(C11yt1#&hV߱Hs-[/ı04ĀsT%ٖ**/A?N"V9LLKc%F&""!2p$JPHQ1~K`.tۛvQ&1FpZ2^?,'o0x>@sq.L{\M7wBsax9 ]@+^ \a: 9;ȝ& /:tv_Q.HXYH!?H.rUіt4ZFV:5#<6D, NI Ĺ*#f1FBJiwP"Z*{9h9T6}Q^lw?1M+jicLlE~ge,gt~[')fo?|n>ǟ~?NEqCI ~Oߏm/Ol=ur .7sB/W--۶lUyTNQ&*`?=1JA9lKgh1W67| t$1((qtN<3Ź׈M%>&$Mo _4kt @ Z<HŦ8\ST/YyQoh)?nӢ,J4LhG 22 LKY5̀MMq]h6 +.Φ|Q~(݂FlU},R0%,3!UFPe73[պyEB aO0u=H0 \35@Pg2 -x3ؾKmHD"Cmudj;Z|ujy'[Ν9 P*֒aA8v  -$ P8jv[u\!+ W"^x+Ci4h$ 4L֐9u+L8a*;6Ρ|FxW-ԝTR*Eh_w-Ù Xa/z(ơ|Ϫ>Ҳ;Fߖ>OQ.*ϫ4Ն&XۼB{PB+E :{.󎞰t#DD$yVfEt0[FΝNwԊp%hǨ͖l:FE^~ߑjVmZBDQڡIR$1CB1: eL 7e\IAi MYeTᩅqάk&E3}bBvyR h l5 ڱ`՟8#:yud9 q8&ǟtp!vv9"ws;bPa}Ƀ .|4Êld0סsG@4GL]{>/I\ık8Q{6>t\;慊a02y9ըYfn? !ͦuq?!uw  1NmwPTǹ cc/ط, Dp1MQKvDDR.!|@77!HČhx幾@\ ;_ƫ=-o]z晭߱,A\ul);t#qrFlrgU>)Sݩuu'vjy92BF6u⁘*"ͷУPQM~ri^e?9r,d1N7eP&J`.GVu:ؕdA6Aʙ*9߆_T3eH6{g(ei1ͳ7Tê$(%52'4Tl+|sv[PHv񀬨 Zم5Q^H._-[RGƾ\~FH9fHk_Z0YNS 8u6eb!W"ߚ=8H7BʲZs6֔i?&GwjHE5S9s{oHT343ͧD3t2b˰duQ<ve $R: !1 "͗}qlaɉTcy_ޣ;~6(-Q].|p!Trvke@b;nb][lj4yn2ͅzOxl*]'0񛕝+Ty<Exyme:5԰9Bt7ť BT7U 8DT뎃Iɝr^Y#gr@Ab4hNna1ڹfiDApPJT$=]m+?4lHDZe dm8 9҅at;lƤ4T`V.bd1§m6Sn4zl=K"UjHd3o׊ p99Ze mA΀mLRyng TDSfWt:nK&}SwktxRᢿUhVPd-4jWp$ 1bJKQc7T9zOh`ȩYpABh>گz`_W?]@Gx$./F{ 詫QD60m^aQmZ/S+)7gN>gLGq#xݣg^Ob=(J$<@Gլz4!]kjL81Gbis޽T'@֞sґ#l:>nn+/(ӀVPf`@dL5z5z!f5^ZG׸-шY+[nҀQV@s%!Ka #6 1lgαLD2d5댾[N WBUA)2 Wd 3)ڜ;C&Motà5,H"{:Ǭ{`5rǻy= ?^ohdF'A6z\1)5wÂD|]r? ;*t^xA!ŷ9OQG idl+FCю`#h-D"'5ti I8yO^5>偯~{6I\9wDn/nbz!TGfUƝG܂CN;1A`9?:M.߫ol:(ЀfcZuTqjU;*n)b u#2-. L@QF?aYQG}I!f!ȣڎrGh?]pd'Y5f箒Lx\VC8a9\ rl#Еs#22SvjB6x$[M1yuZRP4z'rŒT!yرVQp8 =tG'#MA;"W31GaDE'h(cH;n!C7VK">hP3>]ڻՈ7ܖP&l2۬ZCN`Vs{f28%,E&|lG&Q/Q]`ٞtΎy|gziɹ,0B33wk#:8Gǎs8o ʝw2Bg*;J:3\hvx"5؛$}On njg1-xve[N F a;y2XCy9 I4X^)%0&nx,`CgGΈqmE%l e]6Wr)Cm&FziIC)d>t={"bRfrDV/1,PIЄՑD)'lma=[7Yom9Adr®9{Q[B:߭sW j|` :Vf_=Z3WmD{+*{ݽO(GJ'BѓyuAH}$Fz*Nǩ6R*vbp))c@J0z`@zNGw6W;d"8]p![xtPz*UtVGWW̧t6z]lJLyu_OBr4H`G4 _R$5-e*E:,VϠ<NıA Ͷ پ}<߻j=fA+߻VNL@ ,,&}6 c"&'M06Qb<_F`MSa$bZqm)uEԗJ` qUCK 6(X)v[@! CqHQ\7{Ha(3#EXEw;PQc| 9҆EG4 B!im9mS`yi3c\Ij)N=Sb.Ńe'L7wF XeI[If|1CjאS1J"u֬s^P 2ι+[)eSbƄs& IW\?̭h?ͽTzKfc./ ?gfdAö]|fZjijƴZ.Vu6Rv:UO/C {9D];lɿØxlnq;* J}>WoƾFuD~UZU)q㼫qk^mUG"* 4H dPm p "'D-+TR]kBu*k\ @j&ugɭJzE Z&K&}!EWsNC8>" S_E$`D"_`XD3HFꨖ`SqDӱ0Z8n܅aes L'^&llv]66r~N>WÅu^]֥+Q{_PHb*~,+t^BY;%$<{ߏ8;{E[{){h4+ǃz&TSb_]-9cY^Fv+iӪyi(ڙO?s^[B"*DPڞHo,6ǯs'^JwyWb`m"罕M|@oN Zq/װFoOMH  ކ $iL $-lv8vܡvϒsKOd-4݁Tn? TŮ6Pz8@KQ6ʣ:+Z~cn<=L;,b7KaƳa!z!v}DHo#G%],lcmrTvx}]]2{4_K{.C9'ƛWɤ>ԖÀOb#1t^nvd\\r}Rvc܍Mb0+N1aױZT1 <$ .shOJ֮RP& i7utJ/,kPx*ڶu4Ηl2ZeRixe"u:կT #<$ RA-!Z7Ž:oo0Kz+}[ 9î:}}DLP?Τ }u(39?0IH;&4W:ZÝ%#)rؕKvl>\7"Q3Ro}뇋wU\rث0[Q nkU\8kZsCQ2`U%i\7: jU8,֣|VBpp6EsZUktal;Xtp^ NrE^v NRFg`@{F=eQ8+3Vv=4׶..M 5-9 y$\)<#BxF ՚T́z*L^DUnCTf^qX>_lF31ڞ[ dVUTH#/u5%|}I XBIyď JgKx=X L4f#G{;PΦN':if B%ջ*%$F+ _u'MPxwFe`&g nj3cX8m՟S۟-- hiKr|=ݟռ>OTR+y']D ՝~f)՝jLk|G\,!ra9>DY^}ʡϩV f%~ꚍDbfKn+n  ͍*+RIEƊ8_c"uòY!y$}\)smz2!OIXy)1fS!"9VXp9em-~5e:G 3Zxd`2,*E*d5H'\hMbݩ(TK%QSYЌ !5'hcUx0-o1U'FU;[Pkm읈=7]~baT$b# Y-E2溻)ZdH$g3WV[g[4I*dZ"܅u`$$4i@YZY¨Od=i+:/2N{2@N;g$b!o'-R\/8 Sp9cӾ JRC"m1H~WV93]OVe+*L >фb#PDP}0\ST >_2*fƇп{߶1!%& KċEi_"/^bk.' o/@~|\nt֠ݤנ HʀG/CbB&T)(DI @ù yCr]7WҾݭ;Х;;J&m3족ڃWnD jTԁ"nJԁB=LXtVή3>d`c|  0.hJ7&8RD7 >ZTAT#\RQC74Pv_拏ib.ɲlh!q/>ܾ8 m:]_tv{PR,me4XaP 䝮9bBo8g1BS,YR<|'qx$]R,1SGs5S`m[5w*d+N.̖)+H*<<\{-(LUuvs'܊ᤝw wiׇb^b>Tm:hԼ{t{69KeolSӧd7F_![kN qD"CֿmP乞{y}̍} LTlPHU~;/c?YÓ5!yխ:ol|100E)>7COh><бÈ}Z+yzە1WwOt!Ϣi9Nkt.H{uިgJ3pcMb3o67qh"q-} >BZ%9-mO0DZq5n-HKnDJ+z:"PYdU)+eJwnoH6^Wٴ l =}HAG&L=)dw"UCu uwUݱ/|<9dPK0PGʥOuJ+w9ϕ98&~6Z՜H4k|W!D!dw% Zesl*a**α+ܚ4VTжE.[uA T<N|B!^J2g)%/Z;6:ՊP{H9Z5TV0qrז0'$PQ 64B: S+-ޫ\JkW $ [\Ǵ,a̸90 ,6$53]iB쵹M.2N/⋓f.ЎN 3CҪ=C'{ <ٻ~y9ao\]*ݟ..ĮFK0k/W$uzrnj; -7I+zasF| eb"1y4uJwE!GocDqM? beIT.uEQ +SsٔﯮX)g26xEw&>a ޭke^^mIuЩ LggNw3-wr9MA[0@ػ6ndW3U!e;[sr&Ś0)QKRvTD`nc 6 L G- !@] z:};ξh`JFI/F4^.>%Z{rPQ^? rHw޼ PX@ 1m3DmbhⰝ_V;$5fyٮ3Eĸk|YELď4EMʬG-|=ǔfvD%)@QФp&dw3.H6YLYb!`}:\7COiM[ǧ@f6.^O(Pw|s@]OecS4M%C 5P@ r [rYp!uFλ1 o $hJ3g,͸0i. "2=Bً_ ҡU?Pٞ/,kZ*LcDe <\j&pЖ4{ٵ/IYS,ܺ1.2 F4[aijDq.1HB!S%NyPa4:߻TkG#RS`ݥ1H}3#߸@B<ׅfS. ,0RgY*<(A*QsUBr LfV(bt\5Hb{ĔY͢3Y\ou2уזǹ-\|_3R` -y)̙9[V&z\]~uv)]Lj 'An.VcG/lb1O s@yYڸJK{nmGqVhR^(/a 79Lqb@ CNsCeҺg$DB3E s#&F;KxC@&xn,UWgⅫg{(. l LH"0p^iF fP5E*8׊cCuI>=0(J`?Ljı`kzH YjU!b",L*y]*ɌVF[U_FYe@WeWD/r+KyE!\}kqk}s RO> VdbWg]? I6_|7ϯ?3]q[p!+iO'7 wɳecw>q;صFM)2) ,P%s)`Lٸ;V_8DS,>OgxH# S,'{R *yKEŵuƪT#YmHriJɺ|1+;mϟΟSdL|o]N5JgF,9[L0!a;~(M̼8~CtݴHʻ9›*ލS.rfX3n.{ȹEs7ܨ8^^\DKߕ >2kcmA@vŪ]xO{Z;RqFX;G GXY2G`~/^eU 8NkM瑫1Xg+2nhj6Ҏ]Zo婓d|>Q;[M]}06b*zG]&,FזWԙ**,)[źfn:#R7`tNj4_X^T kGIIkyqP]f\|/eu?\e)%K$CkG 70XM(8+CSwP}Sr6 =kQL(p@޽BA[RxS5 :T4x18>, 98,WZh h m8"2Hi’UP !f:M9^wTf%W?TF@kL Q{"m!VX;Ad9"8llfv%drGww9՝QKK| {ocC'](>d 럳ɽ}2-NAkylBj &A?9k4uziۈPmfKQ(o͑RLMSc LVB% zYN0RQ*bV\*Y{4dFmU8Ͳh~?9mβ9fƸ`ŵ}׸_p͇N/2^$m2G/![$coA-O`(* ;X g>̠ĂAX#Wh}%M*:ĩ\shwSpntF:F㲣\] R;:սLrKV4Og*b>BiG/(d;{g#q6Il0ƀY9mdue`H* vǂ>=T0Z0!.%R_S"˧;Gq4OH0ӿpJrQܞ@iNh*HAؚܵ?- $>ġk 5ȜTC B B>q!]k gt"2ԃ1``lFpwW(f^kY#ӝME\Bqqqub[WWwZ<]/-Ѽ[ީ"s0HC0;{|J%5O>\YϺYqzjTb!XK-+Ub95,U50w:EC.f0qK `2LB`}X k0J3 "ypsU(a«<cT*օsֈQa[52 nIW 9B π@HQ(94/b,7%Ib#aaJ$b(I.֟ԭQ pftY#/@T ~C?ǟ7 ƃ._@̈́ʬ&s̭1)܈bV`k# :gD)PIi nUK8)2Zisa̅*]SP+"T[J d.aE&2ynJPDu.cnJPҷ[&zU&ez>eUMdz"N-(KUwe~mX=bF*'宷/Ďy}yGS/?U߸s&M>ed/S!љ,EyƒrT! y"%SގW&YSZZXCv1eƱzUq2(фZ&$䅋hLI2d׺I`by":cTn=FS/Զu&4Ժ5!!/\Ddz-zk)՛5G ĉ1!)9$NN*^4g. g5Ϲ8lBk$V a [fHAa,rNP9glD̺l8x ^$׿X1=jTֿ y"#S$; i=ZXNU[ށTά[4֭ y"#S{;֍Id<ڄ>YQŔvEOրp)&MG;N߾yZ\*g"o?'1{QcF);BMʍ{eod?^b2c2 ŌnMVq~jV6?ޔW={"a-@lE†xPb-{@BIXo43SLZ_1%]${(\Oе+*~i rvv=ӧ*;WEX5w>uwLScb&.t:EѻfN!s6 <oCHu a+p++{0_^OApfMDd"F2Nc޳;7k8-f8ūBdզ]S&}cu)l"a\NK@v6!S)^Uf;ݟJL k[o ~CvO߫eN/@3)hf1mn%@9i)\hʄʑ4FmV'[P`|+ CBrPfJs4cYAװYj5|#>88#3rwpV_fiJ$U,]`pcɍ&jK qʔ"Vrkm8ԂGU%2yꅖ@7j$]&ʺnfUtqmuE]씘-y)%9z[JW&z\^]~uv)]LwOj_y?\Rk':ɵ+))ĹX `$&:Ah7зfڼv3(t978a^I9Ǽ\0O⎙Zty74g^ӽ|N,^ѣ$ĎbUa2ߓt߰g6=, 3) |XMb@78Hϧf[ųܯnoZ"Ilba$((%6X!n(HBMD*Dq绌;r tl)t|)Պ:Gv~)P{bGrv~m!KD #?& LƾԄF Da!A4:[y8!64e#L5B %,V! 0`cbRŘC %ahRIƔF e,pFd ˣ f`OAw 'Eh,Еv LE`'*R1$4?i9Q.\@zOIDwII bS .NJ5f4VBsg ?)Y/>.O'h&' <_oE|W ^Ez<3Sߐ7Xe>~Zao=v}c0dU ޭ^gЫO7S~1o$z6 Gmb(ףon@[w>䝚S)o0Z`5eN)ś>99)2וL'Gx9; 39?4xƏ673h[c0B,!7k/Fliu遌,Mk]l"(ϵ.6(T=ͨaԚȷj\sHxdi'{,ZNYk !$oq,ࣷX̚ҋ 2kO7bz笼4It~9<;HrmonF?fc&5$՟hQΥn-F$rOwb9J-X>9t苽Q].ܮwbge˲{+X dd0D1kZ $h)Ɗtg >sztZL^XA Qwa17fy0cz8#|[3!p" eWa@zͪ*P>E!k=Ztֿk(&ĹwԷ6)GBȁ5 WMW@_.!5b`BeRc# (aG H), É"H"J=*"@E -zW!ĒwwmM5CJɀE4npPIW@qD5$cB3 }͹ cK1a4yzSV %#L _xӖ@Dk^6μ?^ 8{ͼ4Ńie\ztN?)ixu{A? Q?MYOGi?aTx2B(ݛ'^05^mV.c/:xl n,AAөD{HGcHs@PNxtbW) Af4XHK,4wNz0U_p9t8J..?;ݸQij#F%֙I Uc;:a1CGOsb5>4~~)Kp ՂnNrV`IIeK]YQh#*Y] Z(r!;a!V"њhm=@2[?PžޗοlGۅhOnGۅ2zyH14miOtvЂȐJ;TQ,'QS%D5emDz(=9x{,-`h̝􇩘0?i8 vW)GQdZULf4M8V Z1׽+ XƁ\Bp/w"̛ţ (^pF|Ә~e~k,9wu>cL?9冡W<^ewZQ8A-e"c?ɯw}4f{`3'^ (tG3>E)ͨaf+?WZ:<}Bʨ*D[˛}n&t]h 8Wh%Tq"E*)l˼+1n~)f#J?z-{ 6ڄAͲ\ʗ(TF >=@~уmM]Ͷç5(Et`(>A-ЪRa!N}UXi%Dԏkv;kvfjv*jv}͎L E}dkw3L8OurhU8y@"qmy"!M,FkB),l5ΐ2F$9VG4~ ďe%< M T!Ox(#Hy\bVI"TdӸD37fDhY[V F(R*p$Y O|w_rt\04?=OnyyL4} e<̞֓=p &N!q{X6T4Ħ/^J@* ZL=xBrm .p,VcXHa/Xd.d+Bh/~> %: -@RY Y(ì/H\FcD> q#RЬP0A Z RDrVyIB !2fgߘT!(Ou0р?A8rՙsZȈ{rVI/ in6 PMx.]-^vQ>ۗDZawbQ [H4cE1<8|$-Y$ezg( UZi;5?.A@dP7+9ă#WjE5%Rk0;}@j#j+Y\Kb?;I.@&R2wԺ_h$s8X5syUP)[1` b؋r Q>H/y)괿3\/p.o 4( nkMB _r;XH,Q}~\7zYH0AŌՇxIo::EZ5/٠s1R81k+H }+rHq!eڕZILX$bK𽤆蚒\\1y5gcb ㉠Jl'F* H9Ep\G*oUic{JZUӂKCB%fI%DIĒs;AH @<K$ӉBL&8~tحXI. mE B`P(JF2 0,&~PFQb61t|] sټĥf[2A5u-f DP4Q GG"VLTRT"VĪi=朑=Bl߀ ͿMS?nVڻq;n 3 C0i$$R(zU N~Xyc_EcBUĕ@F@mag%刱wڠĉN̮8/At; ׋O,ɴTȴzd royzŮ+oOo_}2r 6-C޼~~\x`n0O1ADx{Œ}Q2YҍxN'1|Wٟo*~23lKh  GOwR%o8H1ƣ?4||[坹މU 7SZ#ۜS<ʉK_\0\bc7|V{_ .~\RӶ]~ݑYG{ ln$},:}cf߽ (2`"R[tĸkr11Gqph#J\Wc3)}20-.wݜYv]!`$l&q&VZ5IQ|@#p^uVBtCGcÔX Kr}.\%Lu@".dq2eaRoɦmn' {gC7n ;7Zw%K}n#ZTC&\vshii3Gcw%CIZtN0w/%=zZt2:Fꍣp Kx%Px QXWVϥs1Kut9X5]\Sm>7-<1ƕGBW#¬$:UNEweȫ!x*;_ =Ud ^¹:^36TskkۊNI5ru,_^m 왬ա+i) k!QXR^WQ/=Yqe2.fT>κ?gͯګR5aLNE؟U v'(rflvHjnF,/aOd#5Çy⨂sWC@N?Hk6u[N{swCCk˱c8Ah`z^lXOmCD!`m=L1U=/m7af~7:v.0HC /F!ʓ?9ds+׈bvWKzuɯcBd5@9ҜaA!E޵>7N,E/r9JH?P@QgaÅJfFulBٱ?d=Ķ,M}g{E<_9pr,C+@g<2@9dm `~oK5w6t\Y;hYj{g-2Dg\;)Pm0j 3t ugBJ1I[S;QzZiĺ3S'lQ;nH[m]uxrTVԭ5MtI(USD;'VjqE& d[Jc/#w}E232GtDSXWæ}=Oy/" H^فs\"eIYoyo{WOաL~PǜJ`M%$URJf1Q-0m+sU*_yjYmBe3 g}q?'u/~{I'ÜXF\#ˑcZc|jM8&Qj&8{&%ưW@37s;b-Ǔ|Tgt-7ox1ӓzy{:p7߽ \r=Ye@uje1}wՅQVxD5a߻dArzeE-ճ7PDQ{ⶣK r|ӥkljW3+#M2"~b6X~')(d~G]wv:ǂ9nW3+t1RQϝs,0ҧ)@^ f`\?QA(ꃮ۹/fb?Uy`=>ߒZP b0M;;r;(nh6qKF"f[#18EPle#c58*}i * e(g CA%^[fKE9 .smwW1M[+:r3o_yilDs&ܐ z+fM^g`ba!"b5DSwz "b8M8~"wf6z0gb3 `^vj 3P2[7{1a/OR! "/f|yܝݷbΘB:ۇXe/Hny[6˚S m/D`3cig[jE&A:f lVAOr5xzlhCWcSXԂ v£ЭjV -NxX&TEH P6#OH(*IRGC:;c1z*HSܬTUo-a$IA<.f Abq$,Ďz9CփlMR&B1X%<fylr*o'*Ǖ2Rب J4h2Rd\aLb|K8Gc*>ojfϙsY*9Kg]X )i"=BcA~fhm뒃\:v;#8 ۄ+\T;UrT RrGisR*=Nܡ%R_ux~Uvu(F]U I,Kn&#d*B9&z&FZאuT,Q9JGzn H\cq| }<u}]xiŔd' J` .WJa۽awKl`0%\M6zgE޾f0:e$Y un <9*t <džr 4g8w">'"rݚW@ X5Hrj$1SYn3IU4^P?G4aNTqOUg&˔q.#:Fԓsznif~0p4j㛋6.le:Xtdtu{0Ӥ)ۀ{T{nU|"Һ!"gy Bt@QRPݳ!NlDeV[Ղbn-T=aE'6C3 s:_ޯr BZ2ےyd]̆/am9IL1|4wx}]xn`1XvJDVЄX*8_JdS$kqE$~ f\?P=kx]E. acUM~~+\-BrX+#O8x{nMBS"pjL2 1pkC RQ|r z*~|**0W:K(dCf6I{'B*rnI;ڋH G sʥ :B0O5Pd4O :`/Ҕ TEP 0 ܒcόk SAm=( )qxK<+kwP:Ț(Ot9e+.Tmw B!7I:bΨXm2}}aIrsy/w,+`sP&ADǛDOݫ8f࠮pa8~UWĢb4m 8'$7ߎ_žp]-7%b{RxL\/_(/x1&v S館mظ{OL$//` Q*nsT_9*rmOfQ+TL 6n$]F@-Be0)̶f_>{tOcgx>~}Y^*cbܐu2TzĽ"x:->L~?~瓫K +%f8 DOZ+-fbq'c_7lػ,&6#ﱙci8GH4ן7`)yl1 cڝ{~Η=^la! b!a>KT} f' akt[#2?sˌW哓eM_eu Va.}GuU1 (W8e,.֩Nb!<#B`6~wIf=-AHdMIR*9k/ bF &(6+ '$EbH"g,46Ol,_^1?<,F*b9E19HZL9VYxTc+]հT)&V41g~I$an"hGK-FJs(& 9*&LNtf&$Fs-ړB䬅'4 Kbe3Mlq (Fcvq@NĠZ=^" #A0Xe% -Riʑ1IOwg zwobI+VqF kIi1k$ 1hm+q &HF ESD3kPkbRiLA"ܸ @ *"iu&`S܁Ӏ<|x\ Wt9D:gA}8P'i$46&8%qMBp' {3KlbcY0Js49S"e} -"@yr .(uX"0݈0M.ax#12D[L|!tExK+̽t-J h_OWIMS" x/DN39aVkz1}L$QV vWp3h:Eӫ1̸9=TͿC4<,V/}x =)dn_b4,Ky/FgC)ٕY^08| s}<]yj$'vPkԧ7?*^[D-U8׋q%0JV}OY0ӧb`I:]jȭIMFoP;C %OG ZQJ@Izу)j1)Ne~m{z;Q@ۻY 1*'1%ko)pR-S@#n,:RRCnI1`tXVgNeLs>Sz FYb(KdHԻPSsipȐPQ}eaO|,F-)> rgG$S=>%;w)'nL%|8oLX}hR(ck<1m-nBY3+\p<*e>9Ffc[sQbs~ OsvOpK"V;CϭԆ[Y$BRͭ>k~zt.6O)ޡ3G5b~nt.>6VK.mE,xvuz 94S(w.-؝OmE.}5po >"Q1 G57(8NJ]fCP9 )@h hP1D%[_ܩ ͈z4Aze! B`$ yJa,aBNF Z@D" @2=iRRE"Lka.İVvɉE+ZSF^B M3lgfiΫYdB~7d"wZ(ѢL7c0:u6viJ/3ɲM%okzxWex@w:CTaܦN(r.u-LV̲YrzU4m*F :O/F㐥Y _ξC%;8h)We@$4,9޹ov_i׵nyzxJvJƭvH>zmarK+=b*WZ?=5OTV2Dݝl]/1G֕Օt?.p)zgԝߠh4E\=ڐ %P@`x"^Q5Vw &RcN H!LY,(9X ̴D"=GZ{&6Kj@k4qmW+ ]O' XFeAbk 2SJv,(.ъp'E(QoE4 ϭ(`,`t=A'vP?LWI0s"spDV f`POJ((ݕEp0с!pgg(* E0(7t5O6k+2ςUvk%#kp tpF> E&\ei*F G&t;ɫ Eu@2(ĕPi-2Ec}Z_TFn|0'sZkٿ S22 ,wUiyo,Ѥ%#][֒V%Lw@ObAU![F'-j5OLB#J>FNMm{^4E#'7`=j|)JՏ`ĦLDjLKDwLaǢSza=X4e2c},#>I`~0uPrϔ LjVk\LacV[_IH\8e/AJb&yʤbj&4ąV9RIuPAqnX' )0!9Oi>Y]ݤ&s z|viSмyGo&/S'OlyOKHP[DT=_o;0N?/R}H7;gX. ~j}M7:- Pk&cH˹#8xIbޮڌq a'9T F֐.cWR,FlWYޖ=CWs@ÜȁxV/o^PѽWcx W`_T63:Кad|x]'xY"3L1Q6&|TµB44 M}IioѬ1Qq|}997K`= ;ßyD$I͓hn^˹a[ycLZ0p !aG\BR$ _O ޢ?ξ[.A?+Jz;Tbn 4T Ǫs9}yŃwT*p͋4?U]-'·°pE){͉.(: bfW̧{ITkC" L}90[{&,(e.Lfb҇w&ƻxmfa>?K0\spRCTcuݥ\!. v l]b y1ihs&002P!z;ƸV킱g v1-:kŢHf ۲;Htp,,!HIW֒7d̤1axP;t ^Tmש ׎Nup׵O4G2gȧA[ԛ*ʘ*tsÎ9v,bÈpbl ˀ}@eY kbG" U Gvl@y ~ $QvP@Gk}0 q+[@xgNd ˜.;F1C=ϩ4Vd f`U(YBx wՈ6 wYip5S0f1hϟY&)%q@⨯ю3 C}k=Ɗay(1 飨_:lЂ##^w8!MbD: ̊g=_rDLFk;&%G0qm0& ! K0Y16Pг>~ī A9^~~\ջ##<Rf|Feyp7չi$5׳weX KЎfgzzu c5B*5qsqoJT5M)rluiG늣߹᝜k ٺjIf H1 _`XJ7ˡ`#h=hMNJwufNx@9Ƿ>0LD,-敶ֲL *l4#~jdUwtb0,P #&T$)ߧk:`q԰#yTvGb8+e['IŽVRhlS)=v2O{E1{b4 H1${t7дbY rr~-pJ{wz kKTF0C^NaN7wy$ā+#xHFO$vN(2[0 v29swGw3iB3^3D94ucy/˳e~:96~Z,;ofe wg 0sg쪢sss Pz8h+uCU2_[0gs-~A͇:i>y?|8N!yc0޵57r2ŗSx(/T$7NbG0+)Ԯ7ӘMp.r)oW!hѳo'}-)Uw`>=x[RKFؔ|{wOfծѤ|WIGfo6BK1vjh~ԦԦ ֋5! . ~/x g(Sݯs{Q\{wz v Ppi9_9N,0Y/^`u򏰍%"Xҗ)ȯBu݂㸨3&(K8mp2ḝcZsdjE"iEf\o)qfpiǰI$q LD}.GyAT vo"@G!doBjdEA>\s1mT^vbRQlj'X 0tnh|v(F[n+Q^QK:OGb$Mp˜;'yQoj f,ϜOnN߆qtYPnucc8L72)M(GoxnK\ϟGvn.7`nT[?O*ELcF)=s1~Qr^_;hRLZڭf"qɌJurS|>cJ"aKͽ: E`ˆpBEviz|R9;e½<|~$aΛΘC0BN$1\G%3nG'5,f鄫37c>Ml0VLYxvAW5j6 V5m4soxG'ߏI600^J9ZN*OU(g;pZTPdUnL 4wZKȉ.cTyWhJj6xhgF#W7˳ώGx헼95CHs# =qӹl#D%wq[L6gUP?ϴ ]l2y'sfщzڟH/tG=#ROŃseߣnZ/hӠ[>M̌q];Uۻ}KpjcizXN%Y^T Rq"Wl+7J8oLG;K3spYJ$eRT1RNygѾW_L7;H!Q~Hnb{X-JsKT>p;W0bTw`xڔ2|gTVߏm۞>ENz(|1K W %:-Wl&v$נQ'x1GڂLf?jqvA]-KTDj^zѧ1ΫCѹxaJ ADSf/~x}o GeH.sU4Fǟ3Sj*D^Uc.['0Դe0.[n&Lcu)i{Źvc.N5b% 7_al SZtfFY)TQL` Jr ;dwxWk*8 qr@-% &d!H˘ ʼc\9[QWCƙwwfQ n"`%$?Afr}D6fpe _};=#hP&$c3_|ZBsŭa"cC& Shnd 4\XKfs,cH(5H>.g(x٦LQcSr5|W2jeFܖ&ݼ",_ !|%|1qW =ꃙ]GղbW˾ gH򖉏%cZhNƜTU`/mlVgdWjBQz[p1Z:D:#k,Rp94EV\^)G DR@FaiV21)U|ZU?`,{bS8 l7?Enx`ۂk%*:Լs#3N!YpN]<vqdt}idV[Vqy[TƲ47'쐉,{ ǩǂMp'~;b8ZxNsԄcUcÝc:xxaB-bڒV }݆nC_վAn4gX!;" Q-ò c4vs_ej}iWמ^A=$Z C{.ZuJQhECX@I<Ս?6f+O\Ձ)Y9@6tK?ބg5J z:-OUe~k$6Hm U\6`gij|N3B" e!LqOϘR\ p@ͻIy&n޴ ^Hb`,gn _"?dq7#׸ ddӈMQވP1rP!/kxHŻGbZsL tRQplkH JWHi\r CyK͌8V~C=h$?9И(2j0`V(sL!O5E^9ss R)82gZxBmF>t%yyؘ 4"sːdb$)&g0͝G4p!!並LsKdd&!n@2w4MxH&ԣ-J<"CN•}i0xPŒؼb'R3hf21"I8LSLSYl|{9 N8횎M;*0d*e)krQvG چF#w 1g{Z#^! 0(Rx4æeL̿Oe!TŜgO:o_'HeERܟo}2q<(6I5,R]{O{Y _"rK,> LU԰԰LP\NN^'Ek |]& |EbQjxE |=&"xqؾߺ$ &6Oc&ؗX8^H <X! 9=HXcZ5Ҫ\T#j#NިIPdf=v?Ӱ!ҚEyM0$*K85Hd e'2S+̬237Ӣjv^f_:7bGEEG1l*D,4)ˌJ!,ʤQζ0-tQRg6jLq=.v <҇8-+# GaB' !b^ˆkM ~eZBKc7sTy {a+ A}=(\<[EB .1f]VoN6TrI4b}%쁆\^3@gmӠ(WpFC+EYgkalQA0>m`"5K0|9s r b;QR9t k^Iy;w˴7+/S?w ۓq[ p(*DMb9%*C% do޲\T{ E(V E1 IZ1U$Lt@f| +0BxbgK/QbX\41%z:bvXygP ~ڵǣ;:fݡM /nk"؁0tVKMްp3=oツ\vJJR4@9ҵF7X=H #v[nكoxAC.TgǤ"vgϻ.}7xLr, 0=g0EF*/W (3mNr隆".z2=lPK{bp-gj]DҮH3Ko*:teQcmI X38~C-3.ML\ߤÔ)ⳏ4 N'Ħ~UK}ҏ.^Vw_fE] m0wslIdnSp3xrrZ{Gbvx9ނ ѕ dJF.Զ}-"׾F9 PRvi)1680٧BwwN"1qIbi01 : . g5+Zg&? ι$ )VCN-L4&1Z렫 XUɻE4b?m}noRrAbի)\_x>4W_!z^m1gͻn*}$٢%B2zAFr\{!!A&2 Ѕ+fAO*[Hn".rv߂'S:2nA F%6HQp@Y0{J|y>0Nn~^)4m `Ǽ~`Ro_+̅G@Ĵx ;i"# d*`pڒ:}Xc똶6/ @΃?DpI/bL p6E]VrU@qL*daR6G6D"3ddrdtrѥ-0$,@36FGV5F'F"zJ@ّ]f:Ɣ\1:p Vd$\u2)B5OnUv~WSjt7W'֤ۻc^D>b 9g+8W;˃?Sa݆Kc(Mgq0@t*%.VۨygȾ%A3cmBR4I2C:E}G 0a%D1&y2$7y2/>a=qrEEpɭ.0 9]QJV-z{QOn~΄"aL;)u4I3 ~/|y.hKlbO_n/ _ P^1BHCTQQVzSr;\)pVpBre;J.am " zh':hM1pTXi'6Zce ƑQ#)\.a%% hbr.4AꥂYޱ`"3Y5P 35Ҡ`D=M gkY [ӪU6uU.5)q*FUzʹ*= Z[I2Mqv!h)AfK (ul t{4 hKx= _nl~Yymq+ 'H_oRO_v „ē ' Bʐ6P;M20kf)`e7D 8{DQѧ{O$s lLvxZG끾_A*`dIA /PΘh4)',7*bS Ƌ'ˋ+O3"Nwǟ/fT,/f{ˋSG΋{.O vXR./VtyRˋ5?Y^bNs|cŜ9Ei#*x;0$( g Qeӝ/t5'ˋ9;bNw移G{?3//4yƄl/&8};=Oi><[t7ٕX c*5 m,Pj f@W* 0z,O>) ~3 /#H)`B(ٗBV@ӎ8kT9>Ue5 2(ܗrv`:]jD{rgKT1" g+i%K kTBE*s^ rQNF_##v*2ֲGUl5| /ym.I" \ĢQ(^ m*+j}Lu@9[GԲRYq mCGvв֨okk h{j^a6&Pkpr´4PucnY _WLWd?_EѢ~U+I턘  7&ڼi@'w@OQX$Qo[36]"= MH8E;N}UroFQo? n;nvefm7|l_Ў~0sӛqrx-A*e~wC .P݌Y}S̺ nn<GE|{l!*wqxY rj6qüe\ae Y/|= 8h5햰uƋ~03:9˗P^<52P^mA&zJhFAP\=ѺL?V&^{ЊKrw[0}̂ycײwo,]%1uӇ p~ƥi-\Newe\i!s*2s<֎H;+LYN}\륔]`!MEBg f~r7n A~0Y)P*=l'u'هMJ`0bǻ~CJ" 1;u7FcUo[5KFW-f%=C|(ٌ2 6] l;{նqw<` ~ZWMQUt-Em]m~ks .ap=mc on(Ⱦǭq In5jGzor` )㖷jV#\t h^4}@DDi+vT eBzy }{jp关ZXokg^Մ_T^65FOwπrBA+ݎIQm{,?qi]V4O"1 b~Ԫ GݷFg=;Z;x KRwMH ֍ùJ Im'wmZYFu^SCJbFB<`6wڪj[jwU]"U86vײO5SJ;3;3~Q} }Z}+0  ~hCa)ړ]ȡUyWtє4ȁ Q8B)H@8I\j#ϞBeĄ0I/S)'T +)?l9ED0!rRhlTy8Z{֗esw{-Lby'#@usO XZfb%Ku;yV C{/a)7Vحt ~B-ĺd_` u>.S(^Sx8mr_ 'ϱ^&@;39kQ'[I"U+pK~s9'߃gX2(A)JZMVa4 ' \y{؎.۰ !1ֺ?J;d7X )(}NMXsB-oQp);ejt?P&yO=lS !/2:쳩lj5z|61gUjE\t>vj>KEdiRq4 jB"2`}T_}W=zD0E)g U Jur0JY%}VIUL11*4A#tz1fc}JMXӋ1sYcc̕j;NQ1f.c}RMǞi3̛\'1cӋ1 QccUj^Ykb1cӋ1KNscVbY*WcjRwPD>ǘE:1>\&N/Ƭ4V}1W Ӌ1k`d13AŘ5C11J5<Ǭ%11j5A[ּ?ǘ-ODX>ǘ+9s1EscT8R'c<>ǘ+O/,1R11*5ccScjBȇ9!t\'DWco,o: &d >),Q FbȜCrP,.͠Y\_?xd]k<|wupbBa_&u%?{aE/؏z-71Ho?^}`l2BP1dQE ej}QXn{;&{7_\/Ȧa`sL1 V?ӑۊ2#63f 7„ȍHBNhRXȭF{T'S*UqO8S~gxQ>xo&(}e e%[{Cސa7d  }*aD!jY :fI-Ya5և[--^D1,(^Edƒ|B>*ן ߐR|ۖףEs2J'ǿ--e$ F=Yɡ%RgUJZOHᰦ`1v*N%C)3a,.Bgʴ0 BR yXBbć*n\mJd 9a0L`Gbs&u ajDl DbPN0#\+I$ _ 6h' 08jS/(8އx 7XJiL(Cڐ#<o3[K@73͍!SK?.I=BΨA-9m^r3X J;%/39*P*/7D& ?6e㮁졛Nчajt2?[l[%8_} oG0;szx˙Orq9/_׋9& _|Vތ!Z8Yq|Wqd]݇`4[Pw/ޟ-5)' )dq1V xXfoaJ4Zk%OzwL#GhH}f`0La[eo>]9ޣ1,,IdwidQYGs. X[YwXûeg@ͦcWE@.9OJh*K ^A$i2>}0gǍ[_/rcO2y@`nŒ#{XZj [j.wx6 /wJ2-@u,Vg 7f&_ⷫD(˱& *e2 VU0l7Lюmr5#4ܠ2c-X1_*ڻ8QIsrzR/"&UWLv+x֕Y;l,bT ԜY/5kaxۧeU5`Ş$6^g%/_)/+*o.rw>fUfJgY[?HsE!e7 #ʭPHa :"TgƢ]-B[2_Sݗri/B݋<,DR D nD+Xͽ֮@X1%@$M1Yi<Tm6 Nި٨ihm$O;|xSi*MD`%:"SVY fX11u*xqDYPYpja0>hgO&DK͔8S6aZbSX5,|C_m_l.ڡ87zud@ᖳqk&ptqv(#' [ 5]k?XPd32߷Ꮠ`zzHgO|~?W9999J;frkwCBK<-'GWxoV&g6dE0J_zoCe²浙Ԏ^Wz Au8wu.-PH#S%Y()p RvTc QP:ZDL-5_#f/FYGS@(5P[?{/LN^ 䒒 Q@77&M|Ot킡Ybt/~d~d{oݿ@S& I?oxTecQ1u8H`*{=y88.tV( O Z8~K>9zn[t`ڜ:E$yOjGP2yfEZbtE5U '6['/f;'&.I*hi]gB`-dt)juK+80+69եPa69 u FȄpxj1+WNQ3F ,QA0Jlwۤ%^$RHhSv&Pο\ ^`"E|&`tdȘEqu9׭1+( $+1111Tt DxnysL)R(-`Y(*h4 /$vv$-頑MJ,[/ћ Mr'#},єoJb Z ]+qĐb.&DPQ'ADݞr5ݗB}PxU)t4TƦK^30dl%UY5 It $\Vs5RrATt =pIx'Cv]Rr$=mrhOo[y)}O(^NN|:1ɥل].v,|bZ9p<A%Kܘp e#-(H8~fM] E1sJd]Td҂1":Kono?%oD;cRg;R?8ipQЧ,-mDR{EAb.m65'-x T^kŴ:Ԑa ,itŗ;;O\\eQ28@[tvm6f>jhEQ4ͲsvgcJ3RYL([d 3(uP?gnq*U%;ޛg,GKz&nlcKlT~dsp^ 5LfϗYOwP`Nfm~q97fe+}*;:R Ktvv6~R*Ϙ]B∟9,>6`DGP:)CȖM?E@1j5:~ _bL1j/gdLV N"DK!],w{P[g#ǫ(-s0Hs#d@2(^p6/gF2m~#&DV{( gbu Of StE-}| SYéc2F8 )u&m2F~|GTuA󪏣p7%s΋QqOdE׌2B!y9O/E9st} B1 DjQ]9k39"ia5l-#129a%Th.vqK1tHVP5pQ W)[D9<X y OF b6JCמŕ>' p;R $ 9$ZMcz1^xe` |q{YP4װR`X d(0™D$AuV1cL@ScI%f,~̧&ޞ4 o^_I3 :S`8"SVY lsNպp6~.~Z<4 evKK$ Eb dExB -MS*W@%P' rI&c9}~) LQРJ_A[.J4Q߁O%Atb$lJEk &*ʯ' Js ejw}n(U;UWc!Fx|swɸe9ߜ@6*tq_K\ 4FO莨?gn8{%·~0v/Z} @77&wxw?E?y,Ʋg^Vߌ~dj2[[ی lQ$i&A,c>(d!B4s~Q₌, 4h7>o 4pJ,=Z_}o)&,g[hBǫ[b -p&R:r-iA9" \g؍ uQeE5P9XFEnpr]`B\#ibIL i$1fl ̑:o9fhĬF޵6"%8'w pA69yI`jOO|[=Ŗln[n^jYb`$,&\xAL'9Ceke34Y÷zo~"$ʂ*jsbSI(h"wZ.׃b0N9̰\Wl:;3܅c^ƳCՐ+(7lhµRö+ #pÙ!B)M/=ql@nYw:q\G cJkR!|dnBQqks2pJ >ED0y!RB!Bڵi3?^Epq~lWItG˳ьL 7b/sCk6L.6%b@h*16 D蠮 hJn6?2vӈm1nZoe=o6hCl~ #ggrEpr(ZoT *oO4&:,-QIB6/Bl'%y;dhY0Dkt6_|n.F:,1ף Eҝ qpHV/TA*G3"$}`Z'p7?QRl@㡆I) ҧթT(BLW/2|Zw8y;O컦D$\,ZN7w,d&}p㥯v>[8Jpҿ.創*'H52d\zbw*.Z0uc"ƃFse֫O>K lzv> e` P0fa'|@Qj Oxd$avY';u&O:VOo$sZ?fDk%^qR)*IQ%LMpP $ FN&nWDrt`|bZt `I*AYY ﺱY_8 sz-M +0Tp"Ȩ DLE%NE%95kDUfJ=ޫJx:F@`*SNG>CŨ[J9q/l<2I "1ʣ46PD.%-b{3:P*rH40P>~DJ(OQnH(_PX ͨj↑ {<y(9joZ/QjMIZ[[Vy}T pVCnz.7 ]·+g-֦] b0&4.bKMْᘽO'"&oD|uۃأ\'zD]ܹ/4w>JV\I7wp>Fn3aF{/.~,q|;/j6ok9#JteE%Uk"tOD.B vqҷO (G? 9HM<2 wKz.?6:+?xRP5*uvP\KX7xմwiϟ詚v'D5yK>n|`KF/6H ESV;_(uO;(s8MhF:f:-~  X˗zsp`Lka5Č&MmFK#g*00Bx䗷+k *.J>fofztbVv fzT NKժJ2=:ѧy?˚챧ĪX @3U _#$ P('v',xs]BgiRicԖSQ+ctkUT߻Yvahdž9iu֪0GB(?1ΰ>2=|907oKwwZU`)F~3tYi%w iJ/_/9s!8)+uhST`CUׇ* r%P=oي?Tw8>{JRr&؇QAT dwg%sŔt*,ȩ 6M#8ܠkC?/yS[ej0N%]]`s[Dq3b/G J]7$'l*oNj;*/v7 j\d2hD3[ɜ %4RO/;Qu[ ` "&-,:'E i..yDh68m;_1YeCG[(>l I|J}[]ŭ_Ly>c8g }ư~&dULD=T9*:ˣwF; " )255'|ʕ:KMk_~Y&XhwNqP('H=,<)ZJwDjd~0œg s-y´IjxbV(R(-!\T*MI RDb}Q|׾|oJQ]V)8!%kzS[9bhe֖`V/V'O٣ ^pόEb|7e(7\+9?urG d;OO*˪/W_c$x%e_ͪ'$_"ӵŃW '豘ÝL&|,zIɧ[ S$N GcDalKڷVM x_Gg05%tΒ=?$}_ܟ>;LYRSߞ\=:";\ٻ/ 3Bau89Cr͂~d 7U^p"&Dɳ)c>vӘ(0K"*u r|I,c47~6JnhBLi{Hc(Tؐ}hI 4Ҩq XM" c %Qj8"ZH)tvM.(f`u&pj,y4tΝGwe1_Y "B__+ok^&bhx.#[ K?NԔ!C5IS:&JԹkeeo M6a?R+ZaUczv!bG5iE32j m͐_RW'wuE\]Vؖ׿ɾ13ŒWo[Wyzt7Oz҈]&7٨K̀ jIY5ss jnJzwjj;eQ1Ch_g 390"ieHOimocK?ʟ.Z4Os5LchҲt00`OCXu*dI0TjM߫zeZsJ\/kt N|9cS2 JW!^1Rk6 +[,5b>w=j&@F>E>ghF>?tg:"uR^e06'M _9{ 6*<>|ߓĐ#ܐBLj.UmMdLĮA:`K&o$$6|!tsV)/,KGNvd1ïc":f5,*YɥP$\ >Dp dl &#zN)QM:V2V s;~\I1-Lb@ ⣆ yQ=n K.brPA1 vY:CbhՄohXu*Mw)*LMf YÛX+0 uƲȄXH8i`*PI V WG$h *h-g?]Yo#G+^fw>e`=c]x ":LRm ,bXU<$4nEdqTiMTDҷ%٥sT5Ӛf T WKP J1E):cTIM%1 _V ]+i%԰+68t#HF0^m?ԛFzbO '_[YB5HdKy oKf3j%Ug* =x`g<LiMF`<}ZApuyXOCk :6(cŰ{ځk,-0.'0y'tXw?{<>Jh[rQHy\,ڕϚ3YiTk֍oZ {Zg}Ѷ.᧫t%ײ'7{}J!±kfR0JT3g ~Vެst/M$/Rѣv=L XM׽>L-S};{xiFsꈸj~i/Wfj Xb_C54L2zyXylks zln6L'>#&RNZw΢xRUXtSJNwԑn+Iagҭ y,ڈ>zsMjCn 1R Qv;)ޣ8M vkC9Tf-M񕔐97Sq)Db`<=Fә^6h&_r8W_3ٻWY 7`Byxh*hTieq4*>qq۵¨RM5@UvԴ%l IP$ I%Q8>Q,bY:ҥDic UMٸ١E6_T & o5x7nS އ0)<>it|S7XbeYz/C5rSq4 ]1y:Di2.:Gׅd<+Wd#څRPS\_EgmΔQZ, *;畕'i0g,8;[?pUGr80 4ڂy )`#mŖw<٨JwؕB$(!E{JY峨4`=b4p5Mpd0heÞ wmKdN<`/4o!c*&00։3mEHŖw3u)՝ kڤ( 7bS9= J.7g{7.&XPObo0Blbg`{Snw.~Z=ijCwH{bWw%|ήrmZt|/!ͪsipfrrK`&ٗ|Xm]>S%SFbt~$6)54pp\fFei֦X;=pHy*k;lyǩlekoW -F=%=nU%O+ . (} PAp}6.)SѩGH&YV>=0 BAާsq c]^;gv9{Sq;9k#O$q;"q.ڋ۩.#z&xefG !k?loSOז3!`\/w&`j+ЬڝTq`]]hǠO86fooZj0RɆ));}`hN(ӆ?M=JOQ6bp>b5ja!H5E8MP.z:oUL3I:gC$Z`@d@Z |a?L-Ю԰X}D z ??$tz%[[i/ \$}vMg7U}6bo!D6\zefhNB:}-("WQ/VSjzRZjkެClSR"܆A;32fZQv>,̳P8m)J4:J+Ƿ1S7mmDgqpRr@)9FH$%'ۿIVIM2nwS5Vb.`gϥ1^b:)aAuXa,<`#8’s/jd%4}l%[z5w_ _r23X9uXӖ|yXF0D)Ad#!\3+ cX(:RZR qZzCUfNA#H|EƠ2E97L*LQAh#%My AVIjOg5i Jݤ{&5<(ޖyvton ?}|k\ q&7/*!>܁`/6PY FɌ~xA__(*T34ɷ673~|7-H!q"Jbp9.ca\Sk#3GIkQu0_mVRkL &1hoDIDIDIDEkyP-K Xw -``Od lϬyfM_3;+P4*~|;Ό>$d?721C&NJw.b5h"MOxVL)ݲrdg*8Gθ |#ۍTۇ9l4A>xp{fLoTGYS1LfX+>|ί`P(PP ,VS6sζ-ZsrPsz${LO}0qĩF<\;fE-x I=eiS6uջqvF8\H<蠨j==Xw]à5HyƁt|u!$'`^D{PT6D :4" k#8*u{bX@u0TNy_)1SL^̴mWn*nqk3m+grz:Kh\,H?1Pǃ)fvHB̸nR6EHkE~(zUgVpu; ,p@b]' cmc"}ht37~&KIq= #P[FBHB%)BeyCsvlQRWS)&#tޣLb֣κM7]T| M5ok$ּIySejg GTkgwiF2AV+bXK%s`dlϦ/j+ܑdiΩzSux$]À# (xĔ2V9 rW僴rhrN#v+8Vk ZpuwVGZP4 {&Bk!).vDR5AJ& /\wׂ@V m=ź4٪1̱y "|p~R3 kѸh"!@ BOC(H35}#06hM`R"  l7 P5]ŒdfIL$b L{Va, 8rCUI`QP8MXyD *SSkwԀ,b&6Uҭ @OS]Xm<l]iGg9!gJr%` 8bq$[7h%>z~AՁ OM{5\ց֌yfrG}{NıaG5Ȃ0 p!9/TTا p pSD#-(cAdޒIrsiA>`R5vS%pG*?>&0u!f1saHE &fW xٻ8r$W ~bě a/ݞ`gv홗n#j.G{}U%Ụ,KlJ2#/*&`uPz6*g W3Mv1Zm'\)FѠvޣk@@1\;T1:5YZ|2}MXj4qBKMI3&x*B3hg=;9_߿뛞 G͗Ћ[[}dgoHRg Nf/WjxwuOhf7[q[̊ӓ쫖+BXmq=ޥ^JwbW^hӄJu;_X<afzWPSx56KCl8$wUcw~]@ JVPd/!Cpxrէ19aup淛'lӜucۤc:fPꏓ<,ia$kV?=O1s[Bojdu,-g ]~ւSvln9n|lO`x~[f{ZvXd2;zlR-.&$&fnׯnu %f2|b?ܯ*|O<5[|rbRnA~*,? $Bb ?w>+[ os:C}/3 /tJ{ag& HGh{>zO`ۺIFk#Z\NMۈޝdl OhݺАo\Est5bصnvbD˃u1 [>RuBCqmSz4BTuڇG,6^8%rW?b} 0W2gcޘ 5T=vRRR;0xs"عn=7KKEnʸ}\-ϗkXurq2ٓ'Ix{[D""ފ { VTx}ww&PM$G7;uRNᲦC_ &pqU#h%HQ`0roV6)n0a4=\g%<U<@?|.6gI&Fmsw] LPr@c8ݢW<(, cm G^0g"\lzI?bV./~G,5k?X+"BiOrR~DR;-atA?_-~K]=ReIѬ϶ld5 yp<Ww1~]׭V~}]V^W6}uRs •S͚8PG2DZ`‚V~Wt*jU 窳2G_ޜ)@ghPc=$Ĉ_tMBA1%P]q`JV26@f$cOfq"v2Ok Ȧ&DtqDzIqyWdĦdS^xBT;A)([TR'=gLVvpuQ0tB8 `][kȁT 5Qd% @r Ts%wŔDw [Stm$Dy2IVۈM šʁv; 5BকB>e2@!u:D_KEU'X3y+I(bQ n"X9gVD|1SE 7qf0]ttNxiP4oN[[[%x+oN6ZՀN1 zv{>ЛwYuJsS&Il@J%-ny3ԫ n4hdĠ"-I ߌFrzr|vHNHgNqVЗHptS!w|d & % WD?kCw\%H4qyc}\j}oDݓSS1zaLC_+?w}۔ g6vZ=kIh_B ͝@7@Sٮr*&3Պ3ʔV^ˎd"RkVCJcz0lHg`%4Nʼnч`vO9>)\  ϢCz{9 dau iT0*K2Y;HVEUǂ]E[bՠ;z7Zӏ|`oA(x1EI(I/rГ_ځd:.yPSw,9Ps܋3t6_k+Nne_CʨM,v9)#WN:4X*:)7ΊYSN*\!BAEϧi1-{T89gfj_*.%pf\Znn'@ TIIKgJƽ&\K @R*kt0UD9t) J<[6$fp>CG (I4Ճu}7sbcy)塀io&hrC3Ib~&fk$1xOs#d-Ӛ 8ߌq5Ҝ5|5h&-E$5hok\Is M(fڎce6|N4|h&? c%6߂%6fΡ40d3j[_7pa SF,?Ⱦn*7塠0AlwTTsJ1* [ P-\ɨL֒L E3k 7eӀ 9UQ!ic\(/9S+J%8V2() ATGPH3K]kS;}nԳY[ZE K90 rѺ_]%B>8%J3UUI!6dTS@ sX7۳JE\I\)fN-vb.4W,!?NcXdD˃uB =uOn]h7Y:UC;VsǺq&[.RT'w&m(;$gjݺАo\Et%rlE-ז4k.Ies|6]iijuOKyiHO$O#geU޹h@Ai~(F^ {y |n!%^=`,[x]+kŒ>T+t!-7j3\f{MO.\==O& lzuD~ݢuȇnzwH":/Dh1冢_qngS+;W4=$&K$wn=eWKw"5S!U:m4xKo23\=A1JjvV2)nr&hXncOwL9u- v&K?[4緷ק'')?h0'+mr>uדO:9MLT.X\Uax%jcͽ,{L4 X / >6;Yޫ ۹]O4pS4[HbfJ2Jѧqv9-p◳;Em&.ad s+Xz0r1]nA»xxSIZ$'bW,XWB1f'aG$@jK^)ڎ7 P\'I }Xӓ% :fxX09.ʀ14xʱ{=#Mҗi wOa~v"o3:V azF~bRsƿBtBdKRɟq%D^5\M'fOd|@<Aީ*쪰kQ)@IbցRP'CaT3B'gE__Kќ1x5 Nַ)[Q:=ە3=v$HS j!p?OUSa vt.sVY#h 5xՒĹ_'|X֑(KpW8*k8գ4m4 *;6r~OkѾ&td%]`8%{5쒽81m9/L뒍Sˮi{+0GfޣH޾%o"[_Ebz~DT\{,upUݞ]o9a|\T;SLQSISns(1XZ4#Rh_rtSZܗε-4cX @F_~rtІCx'? TKJ JB@(4(0CS=H+~P%aO Px  ]^В@s% 80Ɲx[zϙ98IS 9<nUAE=qJI@Xᨡ*[Ib8Ѯܙ*]8RQ'%d2?7`ygDxܮU RU"|V,6 DPb ruKVz 5fUX!=m2T{ZA M-U04Lt_qE+VPXZZh, Xqj~$(Q̊}֦904V2d)=h(LU!cRc܉u8eOxP@e/:J2D_k&t>S/ H֊~n! 6D%b 5@!硼}iS"ˠ5Ѿ椵py   ^W/_џQӞkOMjM/m5@5{3Tί/W{cRjY3T.E( @JʫȣAq9FBi.M0 <$ xA?ײ҈DuuD-e{ҤaT2$zu=o)ܧ`ϯޯ Y7 :Gs79/B//Eh /N@U-Rw[h0=ujeDޟ註5)fAy T0FCQK]EӢ1Zvp+u::Fve/G, `dn8qRH%".:\$J#v%>ODtnѐWT?[ 廟tu$e#Hώ/1Akt)N/+gQs\] a 1V7.^|Wu"[JcΡ28A7p*yvaR䱎h v@W\B8=9ɤ)1B駪|F*wU8vuJO ?\uY q0|LitƝPoxr5rAyIz*m{j~6;u-S)R?B*vYLd03@;+{)oz2C вzhi2Uz(L1xF: hts4~GO oO9kmiSLj 8]p }#.zڮu-$/OGiSn"j-rrJCYN3B}Bl n0^'l  p @ik^Xcm0a[@ԩN<<e%q*)IjIL#h1g;L4ωIV]V]7D3]wl﬋wۻ^rtl’CpN=tḻMie[} DiLY'Zs1CCjy3ԄzSB%3&yZvn[uS0(X}:E6ΰOʌma?]vF#K[P:.aBS"7,jrS^JyP^NuKuYg\9Ŗ26` o1V5] F`*߱v\xt#B_lL>\XP(c.ί]siyRLPL@OX<+\ǤU${a_LcMlS9dw$Lo>]h=g8h2 W $dX9}:v{ ۍ:j'qIG-Wpm37Q@ ; x&F{ap{׬e-­Yc4(NI ߿! VQA"r/fS_|% _Df8/їMաJ< æ5A"3ɻlϒdOa4%g\AKL**E Mƃj~]d>h8O(:Ї_W~eyL$T7ji^`u[^NŇFi|L>4 {V%mjdAGk+pz7<Wx݉SZ+)"IjUybs};R͛}[duv媝ÊsIQ4ӽ@pP5y},1Y-TROVg9+F#I*%6'Rs-;V8 RäA K)PJcأ~zhi❯0AwLTY9S?IiIou;ݠt=f R.`Y } 96]c#!טnEp7"1+o^*޼4Ը^8g;}2('M͈F#Ue Ψwԫ V͚ǦS1_S8uVmvj.s3^ е?f}h6Lgifք[=ζs)ɼE:v#NO⫝̸s)bL~S{4WUާrEn*l1XgS 7BLt%s[˜ƻ\)f+HUs*[V 5Ϡ+-S]L`Z[,ֻL1Z4D??jC2| wʃ?FSܩ+٧9[}79[!Ap ܡP*S"{n9[̘0 ZRhA6m@JhD,*"5g;vQ9۱r57XG'>͵yp 2)斣ߏ = !d}҂\Xw$㶚kor(V 8R\$*.EZrEhOccJ*Y%-3h:Sj5s}2wVm}/~6 Ҁ>_x6' w@gCbpr~wH6f7ݯ<#QdL#ً$<ςTHzVpRw0;ً.gp.'ai~:TtV<yg;'TI4Γ eeee6>1/-rW)Ț|% 23I=h:@ܹ]K :^Brc@H-A5 @NYˬH!upٛ2hɂdNd&J+!.礍sH#TgSA6hEK*VZE`t=;Fh5Bs)T*s!d^xۦ sG6I4UPRX( opuqZ]\`_!3(Ur2J$b#df*wp\2GAI8g|QiM'$;v`3!7cyX35cyX3taA6cLF%W#B)fA1(X1GD) LI)D#3BYE^H͗с_%weM5^$zY;ۅcSثK#ƱwP#{[̚g?wl6ۄ*]*JBX"g ?.̕}x&7 L-}Q %2AV̝\GΈځ[ֳ}7ӱ؇tf:a{:ܾQHQsgabPrs>:h49 ޵6q4뿢{R,Eu|7 HRo.jv.XHX|OJ" {W75J{f kͶ?ç蔡ŬwWߞVQgg/D[jFbpSIЊG qԭRn%Xzdū-qɠAtNMES gqxv7: ׅac i~4r]߫C!\4Gk5B-A9sy ax)xh0\\ajaxe ^5y66z'Y&Xe0/N39;qEn ggd?B6ރz57 xԻ[/nlw[])$u}ꏃ7oR4^ Hu5o:~jwks}jxo~>k~?^LykxG߾Ɓ ~Ӂpozuֺmտ5Gѯ{"CJ㕷3>qϺř`x$@w7~iuLs<3ŇK艓??rŃy67nS|quc,%3Nsw7i+pɿ#w9*^qxW o랛Qhlshh]'(ua|^s]%f:ok;:_4?lA3CK1רQϯu͍C{ =+[1̣$ 9?%.zr;E˴_|ή|b}Ð;,pt vpNk=8N!_{.f Q˭wT}WjxRml۾8.] `-kwOSe,t!Eَ|qҜՆz^>r0xz/k]?n0w[O{óಳFћnf/ w_)joݷ=? oq_{tF_0u?oݳs8/:?ꁭ+uк:g f_4е&kU|壅3p`2P)u1+ R[YYO@v s!sÆ4H] 0PSĈl*IjLGSb5%-e8>.[3fK %ܷN(p_} %ܗph&;çDPEb`Dľ#dfZ>&9.NF+&!F1cv6fXXߔy|(H  -,LxQ"-'Lx2̸xN^J+[JTNk9tͅNX>L#Cj"BRLs?D0 {`)@|@^`Ơ:'I. %he1+ԑlqioW/vB(< QxR۷eo|*2K!S? b8UéNé* 1i&{G";U y,PF80H1fLF*97(NADɨ":kXR0wĤp _b!`$0eISd1eISd%eΘc&Rt|6#3*w1Dgk1ab3rɐ5 jb~%`SsfM Nso )4I1}G* SU @* ̯8DE;>^ĺc ,t 3͉cuG+" GP`v|st.QEkAF|f5/z[=zҎM=8rhJ,hJ B`DiSN$$1.H&Nʩ<1\sM511|2躜)-p> \2T"c:yxUs)ph{T H̡%[a܄)2sLiS FtjFc~I5,pm-7hY1 #1p։hpI '1ppF$S2w\fDC)(aiƘ6 ZG$8u"-~XS_wkh`4TѴSz;k PI씮.-պƪhx,0M&LaJ/GR.J#),h +4RbU GwUuXZMc̔`2S7jw* *{|I3ѫ5)9UtR1i-Eq!z "wcO#a(47c+ +8OXU٤=${φz6KChν2%J n>h@Qo鸼Y\l~7o VM?6Es||YژC$A?f陲"aXs6U>9Z_1N\`8iD.<mV+$XjͲqR0:|b퓐qR0N I8)g~8e" K"'oDAycXfJSfE1ӏS=Jԏ3K[+{zh(OwKW.FՔѦXuwt%j\ksB>Xt@(e#ŃqP *Q`fD6/J& N$\$ (l& ;? ; P&FifyU}g"*W0L2 ˌicvkz< [ .*Oa˩tQӉ].g]6+&xX8e0Gp>Fa;&qk)b;MA <7H[9A9eL2p.N<&VNV:+BLJ~zkO0\/plvs%y{ˈ^c9nT䝌w0y'(3{\u: x)%ز黽:iu hjin]n&DLs|T$ѯ%ӯ'Rev\? Hh<PC\gYj1'1*`$u7{$U_wGU@+FޥJݗ9e+YTTؤ3+3052!` BJP ɠ (pb2 +=|mZEsg"c!@Lur :ϗ@L@L`[F'8<;~]H< 91Ci!38:xDZ'JzɧTAգ򯸣T˧v! +F"r{J37qPy 39;_s5ycX: έ:)cF24Bq(đhjkDSMM45DSGSm_RRK&62תUȑY@ephT2ik{\`Ĉ5sJ%G\}*AU2&1=T"SP!~=okwWX&䋛hvc+|;S`) }d.Ga%UE>kͧ`SK=f簥ctލ3,={XuY,~.B=['%ߧ!.'MQ&5MR/ϿjO:M+JY!^KDFo;Vk}dM o /gsCй/Mz{eJ'Ymz}ǿ=7#fPEh.ojsӀ9z5vN]4.vW=Y:{eoň7?]?n4y\<)ǴQ'"zƊqn %dنXD}tLS%Q lr )ڢz\(ܻ$g9{PXKV V]yyuYhTĴ$޼z$5N r&ҧٛWwf :$cbW!b$s&`msyt@/P3hN%kR87e3Byu*H3";둤G71~p5(~x{ۗ/ }w6x!^z|qI?/ |s}}|0=!:DWUZ9kGR ]C6^]^?}x<<9xD]}iţ͍7P㙛4I X~)OXp"48۟Gt⣜Ax Y:Gw+5u^d胃|]:<"1L<<6v~pܠ wDHs z\nstp6uPp_%BԤӃx3ݞ%Hh9{q*èܗ1b_?xwnP&{0^U΃bkzpsT? IXDgAf 2Ax9$ .5Tbb+~>jL޺wnP y;& 2rUׂm"l2KsK!|OOȋO.\y_\poG\Lw>!NtvF!/_QTvWT,NMI{`s:($du(q[vK*I$ГFvXY_ý5V"{aW݇_ybxvw5֖0 $U"PE"̢ۖeWZR0!#vh#h *Q6S1QCO`96;{QP?S`f 1pvJ$QlFSxKce!J&K22: ilgj As<~^@K*Uw f,xP~^*rɏw7+S4\SnN]ԶjziasC,F,-|ٞ*UVDc*ȩ4_v9IQ|DaRD,8ߺMQrvQMDM OFs\K@unĩ9Gi5S}w0cm=j'M|:j-St;"uZf! I%:u?p,8 7j7y{UѺě_JAk;q~ O%  鐓q-4bb FVh$\RgS|54`) 1VW{zEiӶJ2!II՚R{̋Pl`,:KnԒ͋HYMY3}ԛ6= 9UJ|^ۅ)>}{Cq;$) wzb?Ol0f Lep(>Z@Ht yS\tۻ]s6E |z֟]nRTI/׮^[|x61D{Z 1J-EGp6SA Ps< bV*gYU=jQ{zֽsO:nJ2^X?)8vGO px~(}Wo]7wq^x 1h'ќn.-1Kx:]ۼ2N> U=ƠzDv|LqDɨD]@o$tP㼵,e3S=ZJwE%@;P0ș 7.!(| SWʆ"up4${F?rF@cp9.I;A,WWֻzh`y0?ܴ4Ϸ0(?08->j­#7!-Av8 Fl=Hd\Da58o;;h3Nijsۂ<:1 j`X6ێ<}\u5j\\8ΈHU10a3)jp-,?xwfPf#܎UwL!sXnYQ0$;`v K 0s AvE@8;Ja$ZT~pvOg:zdD7&^#c~w{f,k'mbyPgLThtԞ\5;g!A`1{Im0N!s-Y}乕͐N*dsNb⨃πVLm. R##y٧.#YhFNnk-uܐ Pc]T}i4 לWUtz;kl"T\𵿲Q ~覷iT[B:&f\kqsm߁Kb3-ocT;;)}XF!:wӎҫP*ǖ_`2z Sy!ZSKq3Xy >4̪6yY(*`8"R{eo+Hh)TUgݹ]0HQyYT{+f~!*G ϴ-џζGɰoBX ߬׌ߎle6y&޹.!ɮwypظY ˼ ,} ¨CsUvi ɎwvYaV kmL ==v+Yc!=op xt:A4ZXK?.vWMw-MnH(2'x%Ķsre'$AV]/Kr'/zQ)"A&#Uv> Ll'ov$G筂`Y6|4Pft6; e (v&t/͜$w3(`D$YFR)Bv,+Rf(I9sK<&\'$G1 ya ;뺐4]a+#)'=̷w>B . B9 *f@9z JfԀR "65rB%s,Y_7;5i)\"8Lm6`Dewm{ {[ ۰^c-ly`/\Qup٤9.:DtE oY(};_BXJP_Z0Gs 3hDW;: t(ê&wiE+֐mR ޚA҃W`E)m}ytr6܄6vHWsW Ac_OT~d?rHa?vO(h:J㲷Aᗲ}J5Lsu6igxTVR3n%ÆdUڴU$1_PW,o/.w<=94R36"kt:j_oϓSB:-qVhw죿'ތWz;xg }@d3,*Mק.w'( f "BhCt,\ZjIgCzy\h|TP(`: Ù*])8iKq-Jrd&~2ӗ3Ki1&ݠ=zQVjƅBpgDe^$\JXW&񈘿|՗tm(?wov[Ưd,̏ti^2߾%i[N!WofxofX^n?%b$inR k2)DXYGQMN(x[?|\|Zܚ/vy^SKm'MvYlWFvogso휋M^rsX֓%4ᣛm/8@]SQƔgJwpE[yҦyf2:Ti&YΓL') Cas:`@zmz*\Z&z~Okp1 !c$)*%Twugȩ^eo!6d-Y_OsʈϿ<‡twyvFL:zL(}^X^!) }3UK0b`+V:.xht1+%FM#﫜X 9IJ'X/#mщwOF{E.8$KVlN,5CyװY;Q͇w(_"*NKyX6棲m]03ܻK4g34"2 ٩yEY GQ Edo++pI,ȯomt (-' 6A)]1f-״4O+!"t)WK czgQ6JG+}TMA{P0zEe^۸X`#~$>RH`EYkXcqGsfR)_lf)* M?u'9R񬰧àdX ܰqH.^ecy<>[Al`Tlog$k\mW5s`5AWxX/FBe=<815z$H:fanT0fBvdpКK7 l 89 {e=ŅdZy,;2DIGpZ!U^Mwar.>W\քyπA8zЩxJB9!}T_ $-8ХP ; MwڲAIOƼA1݉@!L8E&N!Lpk'4T*1:8J//YS=̔bη9&:F]D(URjc*k>z|yՐ|CW2jM5& TQ4r[$KjdLZTj RZCF;=}H\BǺK&*/2ϣ<^OWÏ^_+N%:frqz|YzLHJeK'ٍwEiqo-f5wzjWQ0}T=ǀIm.)ݼ%nmmEeGF]2#.~$% OH[@?Is]f۷δ_9ۼk/w;weg=<46$ѹfp_Z䪀\,ܷ͋2IcNZs@[#ŧʭbyo׋5h;M^{ngwK.lYrv&/McY,m޲(2~5lա/c?iUxhE^w'g~9l9=ζV E/s*#G ïGpGyV?}4a*/tKu۬h⭻b* /4뚫^:rV!Ϥ)Jh=ZnbO.% d#,Vy. .|7>t"$TK-i+NYr) BȘA˴,dEb%{;j y -x 3Jgm8*-Vx]ȧ7Ͳ'w K5No5OzBrCtu/5Me"'j$10T=I\]~@ZA( ^. oo '9&"I$!<͍ IV.%0daW 5Thi:|$C@'cCd#l AGPHMHf j$-͑kױ~f ?@H8qr];Zhjp9StjWs'xҞ a2n^0y6 >1ΑxiBbX9rK30Es=G3;LL!!MCIƝ; ;6>GLDN'X@>vktzeVYkɭeQϗTiUw[%:)x )|=1-h%>u@6軼djD]RsAH9=ō"uH(E.iBL9al <65>} v Rvz̓z5|C_3 cgH睰Lc?xIб.gxi!ԀCA_d{&DH|A\^~wAڨf&rdU#NVFD| @{;o+kYPyi|V#ͤv&}g0jյ֘Ce%g5<{CM2Y-Xhh7^k5vP)yĹ CģNW 5??n=u>Ҧ\4 ҳM^4"ND|w ţOVj RJj #N%K|.W`ob.[ B¥:ZLSU΃9Aw9*.W)~(Q5s$}/jp03{U'W1BY+0;uۼ!/vfIfI`tHݲ,[d+Yds)!Z_JKi'=r@HK?'WWj;b9>sS`vC+t A8+\ Mw}%}'դݽNwtW ɥnTA;\:),Vb\xjOF?G :ZYCsaݓFJqV0O>tGiO^!蠌bϾYLzyV#X0Wvbg7aEATT:OaRp2F9 c`2d{Lh4ߞNnʛ'fdY UWS#hB`;j  lU,fÌ]NEo ZK.[c!sg7,~qxG 0aKXgp:g텤Re<)0-eB AnXmFӖgF&zU\,Y8_P|><((DS jT&AuFcͿqņ=R )|k݂};VBdcwcnk'W=eLW_֊C"݃xW>qc膄?VKB>nׄKlj/(zb&-4lQYK bQ?¥vǿKL!w{8%.Ż"6uNwRJK@ڣvEPThrN7a0o. Z xf.@e]^F4%§Q~Yepj.E,`6 5[z*`NG^7hd. d$[i; rVQD2nn[Z&qeOwJp[Kγ^Y3< [eÎ~hdY% qaJ:Iu/8ծ'd'Cxw},N P )Uiv1nf^8z=hr:^)|?^vYuI;#ȫ>KNNYP3{3(ʒΌ v@D7ŗ܅0Rk^ůz_;ihur:1Ӛ,`$;韴54Vc| +S8j 8c5B%ע(*h ϻ. ivÿ~Ƽk "pwRE=Hb_-_SefM./׊-ϟE ɖoy<yb,p]>L\Q^tYpk (r Wrso{7EؖoIƑlܒ-ߒ|.9=)o;qdHLԙe3 *0H=~]W$&^͔:kMN_U 3TNU{n?e*VҚ)":[ln]߻\ߴe}Q*ɿ%rʭG[os..\qpt%*SXE\.ܺ[sHGf.;Ӕ)D qE6d*QDTZwt1 6_G+2]G bKviK'>斗Q__?EAr|*G$zZ\f:`Fw?I8b$Xc (FryGtd. A0c8G1GeKF5LS }iUE#]77hAƛ5b OaUO(Sݥ MX/- %0i9W&Ƙenre"IΩ^wٯ\,,,lځB-#fҠbA1 i, Nsӌ@QFQ5$C ׿v0'r"h]9z֠C8-ԄeQ3B!0aiYЌrLE0ZOŌtBŸR] ;%tD LUt/iW'lLr \'.Zs!~v=gfc ZQ:yLZIc|@Ӓ[scubԈK\jdN&x4r(F)t$*CE,4so# hKXE%ߐYyLi~`-FEZ=8C<֥%3+@o ꢯNyg-N_U #FQq}a#hh#U; 䶊grIƸD`e̐&Bd^$2qfR\x,%D@"сqimzzM΂dLd2`YihFSG:`!Q)^Xk.5X#pQBPCK D|kX,jFp$C(+iAbcD1Z#G{8z"viF*1Ƞ a,BqƭUIjF⛛f_k*価a$7` bpǙrM%IB<ڃ.#22ߵ`k̥!uz DEA`" xEd9UP2(gL-b1|jv$ b/CyRY)񴔯$%AeF]@L4֫sb'+ LT_4)a+ I3wo(j(vcXvoЁxV`!cEm-f @><**(s@yq|CQ0i,Vrc#(.FbMKXT~j:ک p~7D! ̹ŷr~G.޸otXkio\vs,Xzs_|Yyłl;4 ,5C1pX&* 9w0,t'X]сF%Ɯ  ,9S  iU5SS rZS}41jA,0xec{AzeXFT|G A@$,G_z E1S6S+~,63P̶eo`5?./ӻ[RrD0[FS + IJsNl˙*)K3gd~(7[LWvZ>܍KSMهs0Q!fIӁDVJWSWƲEKA :M5""|EVkD k0Z`$B䠋؅P_J^roqEum 43j4, cYl1t C#5K< 8g>vA-]w~<(WS[y1 ZO}}Āge߼|u>F©f2AVrb mzXf ZWz:&!MjQԇo5 1GYC.J;BFg@x=XRx61J~@\d f7cZgښ۸_a@*?l]$jpxsOcHICr(b^,EIFhH``<{NJ0^" J'G]$\OOަAQ)I(S5O$R"c$Z:{ AOY+< BYzW֢u7-RF+j k KK_k@e3KM47 z=4&DkJ"U7=FRlБ`!m;+n"jR76-hbcu81Tb6tQ t5#VmT-Qgeu@NOTBqؠ -n/iTB^NH* b,ilĄ"DP&ԌP5v Qj0^#`B—YkQ8$-5Fp>廄! { B'W]ns饊On"4`ZD>FS6Uxtb8]OA,dᬇDйzPZbCe[ ,n(rdavy1&{[MfЊ t{OڽZν{\ Yyg5#B2(OtYRZH/@X U͹V=PٷZY R8zzGڢGWlEE'P-,6>OFoD"WRr Ңx9M.s3ʓyS*|=E) tK R&,T9!^`A(`(ơĖ lK1X`lf.P:~:zObR__!f$Pg _EZQHaz6q'$Rn񣵕jBuGk8'fH3.13*GKG\ļ\ܦ-so A}h3$M=z5-aG7="DǃRg#=L"#)QT 'Sa aXSGf{ ԫ*0n[rz%VÆ~f+mM&?#v_nN3՚ti<̖\uţ,ڕ:bQV)8\^>Ld@v#=Iwe/K)չf& za +N sޗAv$=U_n_$ȶIO69҃@6;ɄbsGfZtguGbl*< l]{VyD;M`߾mE-3]94F,Z F 'o5F{@^9$__j)>Fao_!{[]49|" R3Ftw{#БsʼnWBˢQZ~DYw']꤁E{I6P[BS~ݖv\4 ᲺrNԟgNA::"!а ]Exn˧ ^ߏFFfJnje%1OV?yHcMI,YtYaJV{7ZM̆9NQ<%j.!XCE웠Ov: tlfўA:#~I" aFb`A-Ս]Lثc6õ;Ǝx0vQzB| )ܱrhBA$eNpذ>K,j,I%%CĎq0lc|=nUXCPsYn6ή?mc40K[28rOZ@סK&5~aD.0t򒸴agg=s2y R9s܂Zwb*!V}!~|1v;yC c٩x e`r{7]$4܋}# T\,XJ\q?Oeg(_MĂީMsnDtkЎ9S37:TNbfeSM!MZr M3l"~0VcX[)co]۶n۰bU}ۍW[fl o %;F,!X>P9ԅRC#7D!mA2F~lrh-pHq[e?k,9Npӥz=/ī.ch huU!1Lk,|bJŸFbZoTk* {0Zhڧ~ Z+z*XI:)89JmUv38\0q!t 0+ (!>'g+FN.KjzJ*/tAl]1;lk[Wk}vLР_RD׎w̎]^\vO6\#ꙕN:@_‘dX^P^oq$ES_i( ]elOfŽ\cpm/jK6h ψ|-~Ey?ȣa#E6ksq,M7^F&=W&jyV,#xE (v$mb^bĘ]ﹿ "T%l_v%{FW:r.|g0azW@K]OY9򈶢Ty2 FI螕tҁH&i7ބWtȁb[M|9RILPz.+z8Q\Ec ë8i$~4 %zͯ 36eT>Á]MP͆ͪ9};L9>̨e5}u4Chu=$.c-~˘|'ߖV֠=U(R{--4-?pA,5lT"(_󇟆.\_ ^ۂhIĶ;|靤l{')ʴJ*okѡwblI8f%G$a!1| |7<"ov2wQ ]]~Ǩlpz||<{`@K]wj;9{ᴥU,7Cs2+¶Qna$`0f?2&ǦC e$pW^}lCٻ+wh^Ju]Ubi_xI|ڧ+1Ur`D Ɂ@k~JEoǽV<΀<]Hւ9pcIp 4 0#$ %-@)3OM0AG7G^6T뺾G#f-r_qB4 #hy#~$F)X6kh ]itYzHkLhZ2j1NW(K\VPw,1Ol_~w{ײ|%x:J5 |hXI "+ D-S Y.uPR kNQpM~j*Ml*Kt\t *h(-1f\EUez׺{v@n<^=!I vQ"DoGsf#/+wT![2=IO3q"Hqx6։t(ʩ5nj'XyQO0w7A q;6ew~0]{x+~׫G(v#rSu߆i'GP!OeAfp Z`jk]mO*Fo{?e7$B*+$.YѺ?OVZX1 kXQ&+HN6 ^~ݗHj1E'o?Zr"@v8}kjgt?*9i-K$ ћX{uUeŪV.cKGhUƆ1kIZڔT p r{?~'wӇeIZ}g&u-ŀ$viӞo}qwI+5m1n}9/v)Lt̋j݊@45;J3겒VR\S^R=X0M3q2E"癔\9,g:C ! rAPfa];Өr$T/ cD?=LiszҼz_.2d*}"P| !VUQ|/ؙX(Ps: fMIr2 yޤI0B\<'~u/sp>`\0RĝEͭٯquP-yp4udr㊢0#cZ(MDU -̈́Lʠ[-=.P@.+X"]!Yp&js9ƚIgc0`L#J:YA~w.B&[gDDx۵NֻsFg}i9t oڃ݀$g A:IKe-!Nq cl9Y΁v>808TE.-\Z]/i -#Yhy x#܌3Q$7ɷim]2]ߟMLT[G$M_9NK3Mn`bnAy+|aX6',57c?Iu)qSa^K4+!yu29yJ*ItfmWlT.[$O;0t'c2^^+x{Y򝽁8,V*0VJ9b[fU@m 1z 3z?|/j6- .xhVZ1T8Y~˅w'Ew_Ӛi PZTu`_H$Z3eBh@kHfavfavYw%HuX=!s{-ro$/P3Mr,!w9gX,AJ|5Le s5x!%c sHYȹcG_ TD@= *ro3Z%teeJ\V IcM,Aokud>`-QgJ{fKLctcpJ4m5VPiSj^zHRܖtKxt"RH";nиQ[L6j։ȱQ]Cq;h^n`Wte+-Sm$v5ups})򵛧w9* В378,rn;ŏy8b g8'W(=onIXC۫ 0zLGz .F>zpf'%>QQyθZo¨΄\᫻:XnoF79Og4kk{}O  k-e-اlwdd?=W܆LeCfV57)eW6zO0mew:pTvw1`=uCjnߖS{Y TܠGWAZZIt6'=p7YoIT.+7 V,Z eÚ8:}Xs}gLv  t՗GjȔEh"EǥP$ӽP<u#`m)C ec` 23`8FD>sG 3dw])n_XB.~I01I Yo5N'{:|1궯ә2ɜ 9:ƉYq$.:6;j6ril>Iei90 om5^A>=8=[M3Z'J%V$**h882^_/} cA)krE+PCsq ܋2yzҶ8vWFu,$L"&YsfL)]}r3.!$jQ:AkL^+jݐAWZBM=D>{5Y@S˓+ilg4bޏ)&6|ޔ 76i)Ym6ddΗf8x˺31( Y<JrE'Ɛ78&ƙ0Ό lڝn6MkVL;`ߙmq1O##+]8& >Ӽ:";/ |kwk<8NRbhirH/HW[y;Qyqp"&4%ye4,y0dICfH`fN4J˜AHk6/ 6` .;/?Z!!Rw x98U3ǍpPb[t`1dPX&w3 n\C;wCN(T2z6F՚j Jj X+jw`/THQPpc5cYnx@n.]dcϵAtvT\ml$+{ͻF&@0Nގ%U?]ZmWlh?! []=9-0ޖ=f~ P9VmX Zy߫ME@|e ⺸^ԲZ?2Yۮ{zfRt]+'ޮ kUiRxe|xUĨ~:ʟIy~5zm?WhC2 $sT?"]GK0t'CAeu Y51i?x7UI]-Q.kelgPƊJ7~#dMhF9'9*~NXihw4fF["i!'iK[#UwS)owtVD*m&I[5 t~zgdXc۝ :O :5N1Z5yxM^tKAWڮ89 (R:ӷLr&_V}:^p~mJ ڔWf@h~4μO?0CΥ ڝlc6Lj&=UñtԻCG={z4o&Rj}w%Q\>+~=C7kO^3+zl&әjr}#^׿s=L6T=uѲHzMƑTϪJR_㚗NXMv[#)Lj0)H )1Z<¤l# j4-.&eK"rEs`k)Nj7 VY֑]&/VgZȑ"k dq`9A7w%dڝ%^_=EI3ӒG짤kz*փ]@fntM@WVtJʂrE4ϾRp*.؋3< v<#fu\9uc]}g),*EE=(x;J::h9t^ۉvlSу@svjDKb@7nK b&|<<@ف/@qIT!81Ӭ|5x>!|@99IRv>,jRz Y'25zT-ʕ& 4.*$ ,~>]hڈ51G.9=-2+=,]E|8?]AY>MxKfLӜԀbpByN9.HC5n$#|qOi{}.ƝsJ=.8NN\2PXoNŎw+&ێ\v3g$PeKs=x$ Jvڇq_I=ԡN\2 }@C }R:!7:po㗘}LVr7=]WtP ?uணw=9GY ͬ8uԅ҅e .9&ek7)ilUA'o&lyPzX9Y~ ~-?Nf[ˇp}Bfj:벚-:x%1N0)נ3" aE,]I>>+v"N%I{5*y#%.M)aT* ̣CNhᤲjUH[4:(u.)S+hdg[W 7 dw$M m:*M%մ4tAu¥):Ն TjmVK8V) {UZ&Ql̔F <_lof4J 6$U1ZgEH@>蔠aA;!"VMh@#h.ЄTLJPDe 2SEF煕d5x<:fD+q'Z0C!%Wgg&3ج?t9I˲>^9h3lRMwU [ +kAťՌ+.8ήO1[iJ0 w \kƆԢh-[Ǫr Fl%&=8@  5Ls=e'9`Ζ!-wz_E@~ yޅnmi6-ܒ.tww$TP0avq A-d'Tz`C7·u}BnI2&CZ%oˏt#X G8)U3=_G:ze~+'y`q!)fy^x=xa8 JѬ:EiL2A2:>+QwLSРt =GmzbS.gIX[t^]š2L)`T c; s>Cg$mĆsɜN|ږENHP3~ƚ@9d*fZ1ښ2 RS0a'5R07LL)7 BI kn_һ?Oc~R퇎 z5Y}}*۸Ⱦ|iѷ=w[ZE5[bql3LUN2{p;lB&g{ сUhI*&wa,JmFd&-JaDWͫҭwlG>ep$3})U t΍S!pB=3έw"GJxW뵽[- %֌R:S 7 }! oxJ\Q#FP'qI33]i3tJF 䠸/%6NII 7EIҀ`RR䔔գ3RR9a']ing #^v|$\Alsi>7)|RAw^]ŕf?x.yĤ~wäF侓GQSm [_g_O|h@Q@OI'E8cz)!htů> ogb(LJOqhz̪jh*B!i  g{GHܽGqLf5sT2|82g-n!{룁LLDXՀkzF 6e?|07%a\Sf,:uݹy' 4IIL_/F=S;869 IӻzelޤS5GpzҌw\m_6:w?qԺ;+ J{(ts4qNˣSz& /b..bKFdp:X"cBϴaJe$80p51 ZiCBP 5 ˹.G>Ϟ%m[3Zܔ'{QB#TW5e.Fj=pk kĄxҹ-^ sp*¸R5M̊hyun(Y4%8ri 5 )%(FueS B9y[d h$Y6 oM!zCq;RdbkZ<1LHTDgÉxtӅў b%uʩ¨X ċ\ mƨsƐw5N(;9E!3ZRj =I=<P/qG c]$"}L"dw)bUw܏7ihWG[pg-WW_f6,jEFRBUӿqO0~)Mr>ZO$Oe1kO*:7^ :AM9Pѓ=4obɯiڼGnO(r9QS7Q,(i+rO%dCx :Nk54}/לO9G3P2Ey25^Z1I0IFFFFVVT1T记%atJA㱒\h]FlC!߼z'{yE||*RyY?7e0AS2\ד$k@k }o'<A 5r\&[`9 w>{mf?o߿$l?'Rq$Zœ $< ?ċk/4 _[05rL%=ityg['O -OB}u%fXO4RPtV&D?d?,ismw}akΰ9a^6>o"Mb1pwї"^ z`Q9=+(N# v 'vnGmǨ#-Eƣ=oԉ-udˈB^[RC+rێ9G]P;Pqٍ+ct["oe18JOЮ(Nl+NqS׶vTZ8y%{h1^F[lh(V(nB޵m,"C{ SE@>$nQANdIdΒP%-ŋBq cJ@U6vMZhpx&(,|vlJgy\J, 1u`~t=| L1tQ_^N(X%O!qQqj\dFI[pFZmI{E ahF+%`"uBQׂXc,uC(d&Vx9X%O/؛}S\#r\]}}㙩nbм$yۻy0wA/ܕ05۹gv4q\WS'd8 'uY2BjIe$?3#\;xkJ*cFv| HOJx3$NU%"Z;{QT01cy6&mX3VUi)r1_\XT&߇ n&+bwmQʝD\Zq5VlHƾbRXFY/Wn}L%"$Kpm=!-_'O-Km`,OIJ> NV|=]?&7ZG[3u 3gvm$Hڵ봤MGzJ2"A)׏"H(ՠʣԵ<]%"EBzom3JqAGEс:I:)JUy?2_a<&mKwr[|aXh]wBؔj9 \m9Ou8^dYyOH2c,lcgp!W0CD[Sr씢NUm&rLlhO=OA9ڿLsLL:j㎒Z߆norYHT_3KGat?6 v E͟>c@Z6l4nI'.Op  [wUP!4{c FRVMPDwEP>`Okr[`vXoEO1Qbd(F'Z+)%Kfj]f T#"yūP~B9'Mż}cН";{2rPl!0/oі_:iBU;rF};C1y P;EB>=a\I(,yɺTf(En3cY? !QΥঢ4Osi5f>.kԆ1%4:Uc)X4AR):ܴ[ @a8yP*,O 2YҸyДהl{#Uqxw` =.9oy1`y4"AIL}R5ScvsZv6gζ6܊3io57.3ٱ6砟 $-):{$Lo_,`3 KGo -mMgw`w9L"|aj}LAx7<:Awdڌc X/S+Y9,3ֻ̬XwY!"q:hWcd+2fd=|acf9aIr݊;1; я%߅K?{9Sd̷Пv ]hn﯎zݻ7x|gĦy@/c;>cj-m+ku劲RUbq#W׎f6sdD5jDq\vW߆\¦Q bdkj6p #UZ_o+ڡˠƑ66n&v,])UE Z gAc_U2'}̩3JU)/HE{qiyWi%<}H[+Ͳ M66ltkD3&!,B<5Bt jC%4/"R`lZ@Z+|oY֌)NaHi 85e# q=(/yHQ Ck1]oqs'^50qNVo~B+e ?]!dW`P" zYm2l k,Yڔ`MJ=9gαw86pI"GP:HnA M?ZBe9&p.ADx&x"iA,L*ѩExYeTfO(BOe>(H,Baл.B!Ĥ+ R 6msaN(/by1'ˋ5tJ{lٜUU{ċeP;b渊 ǕFSG"R> LȂUR6wL2^e+EW ocނ>3tJSsls,X$71F1~'Kcpa=:7ڰ0ŝ@Azza\W±qEyg?|>22PDž {`Xϙ0;v;h;a;`tHcߺ&$+);V=xrݳG׹{f[yo໽OpgOsz0/> 7_OqFęaDŽ&9}6<6?kgoţнro/Lo݇p>Jž5ʬ){lhNmݔ< ]/uf<}xПSRI^w9\ _'q ûIoK>?7%>A!ngFc?= ql9^ Ɵ~^u{ɻwۏG.wT8ѰƐcC$hu~X;tn;邶!"E%~  ~02NO B>$9zG$e*>{eS(b8 F˟wWIp /SqO2::ydOMָuvK0Nd%^4\j=g6q ~.V>)IػDDZT[DZ#KA/qQd: '"1gp]4$,*<-q,Osmr[L$B;.PPs_ JE؋>䏑JVtIO􍙁kvinśrӃo?*i㥭ͮ9fcUjc563 qR-j+?@ˠ#d9VWδnTI֮2h()ktd@KDI,ڂs2RQ:Rp^cXj7ֵյl2Jmρ(`%xByǕ :E"QzW~l)0VnZKEyO\+M2U6RH u⩲e_*X ?]c87q<x[hdR"\}&> PB##TR#qa |NG/Wƈ$oy}EFś ZC܌~OsZv% 74hojz]ٛf3m8>:deyhUR N]:WV4] N\Dz_Bה[6~m–i8lv-~l m!UZ257 »EKH׋׿ 7}}XG%/yjMPYAHH=Gdaz1,H+ڠw'T,킶$PQXNN2(u#Yg Sґq,~Fm6R[6Ϭ\PDwT7>hˆ1Nҝyy;򺠨RUd>+,TGm{o1֌'@sJ`(C"ݭnz]5IsJg HVUXJg]J\y< PJU\JAիS}'աOw>DgPV2fh +PMk&`X8dǗDWPdwCV :098!Z1^b_ɋ=PJޅ IKX: [$JWpmE\ȑ IfN"EJMae͔| )_eN"yL Gk \;^C(Fft3)7i>8R]H$wtӜVVO+$&Њ`&G_ERO<9V1`'"aJ{H :XRY sAŘuXuE!S Dr#Ca6N=N;0d=~x_Jэ&/4N%'T-b?0 ^JKR޲54V`3 ۥiT nB%ۑ_;!.bxϪMd˥Mipɟh)* >G߇fX"8QfYpY)HK b4yn kkN6T,ҷ"T* Z ?>`!Mw%S d+NeX֙bE^rT$CUZF(|- f55lȜ[lA:`Zs\' 09=@{Q6=c]{)l݊Wu{fkVĬnf`ݛʞZr8VqV ("%f'Vp%RvIut۠ߑPZWLt;"YHյڸ;\vweW$h8h46zeEmʹ*8S491(j#U1Z.!kNQXm_&`NSnS/ch[?Ƽ`w;o[C؟.͟wðZ67dǽ:^83gGe]QY樬QY;Q8r{D!"07IA_] dXdC ,`};|u$',_q$#MO̞i(0lDbwOb,C %0WH|r0)1  _/N'S?կ~}b@{&7On~,kǓ<;+uhX?y1=f"tK_2+| ]ڱSPdL)zKԍ29ݲWQ+C+X/֬߾_~ޱܾ?Ozu뮳ӿ6.?]߼jƾ8h4@>9M$R# Dިف|̫Sų7HXA$f]\\p- hXp&mon$ og_U)c|O\^y{껋¶۟_`x֟yUQ>?2zu}s'o޿y#dV(bg7?../J{y)[6PO}gj-`{z|8䕞2Щ6p.T?z8{n뙁ɸdHz2a閳X1McNZ+6u1_Yu%m1PAnuA--(T,Bb8Ԑbj RSd@l9!kD=5BZhxV '٨/opȳp_zg?=2ѝ\|.j 8\ޫy 3ζ-ACz71%JlE4Dek1I2%+ Gc]#U֒m!Cz]W:_F:lUcΙ6rB%LelAӠ)/z(LHאȭ7-Gqx/Uc:HflXC(fa&+08RFYĺLJkb6itZ mxTICԕ +`'f6Jke&6cMp8YwkР>]If^XKSUljRB9sO@! C*JT@CVRsJBS87Io %" '0Dp!'tf*5x&T@4$KA"LC5^ʭd TǛ<7g7rM$( eyCg|L4AJVE.ʏdyv`Mԑj69hfYȂ 6'y0"`#B10|:r"z30xA.!p&6羒eB+#9ZA|,ݤÊmwa`FxF;lJFM q>P)d,"&3SEtL!U-~ UZi.QzӒd\}g !D!50%~qe*4DC׎[AiF-$]NVMhx(M^ ifɏפ{^jZ"V gkuF>LB{0I[Ov87L>x2˷{,;б}N<~^WN&m#_+ {iIk=@^rL*+|\~۟KԹùs-}?;?qzp~͛/.oӿܾˋ{i9ALSE8<*֋ s${TfS>u] r{ٻw'$}U~Hp&J HHa >e \mQ^(rֹhYGke5j^KLmu Cř=>5vZs'Pq&':QC̞%5,Zӕ}书&ugkcM?r\sgHF㸺?Ԥ$jy"~ad7LQmiYdZIтk;"-N][L w`Fx;.Ew槼f׈q'A_ΰWp`kxWp wnU;.'( WGx^%_:>PGgjJ6:PdCzkK SelhXoXt`Xm #hsb='LJ*@E`23$fPRD]H)#z 9Eb66g Oa(H \THTMN$ -*jδFcL̛cLh.l}2hp K-gHaa VY\9+:6J`qU l% y FG`Aө:>$rShA)_$dNز %jxNĹX'݀p0Ee4k0zWm^_X3?Co1m9l ,#7╘rM`Ej93(A`YH(Z9Gx_p[0;kx-A&^\{狕_RgM9pvu}D&~~p)z~m)o_hћMyqr~"~\ZU>Rmӷ"Hԗe6ZDc [AD y_1:[5,B~[g8Pf|WJHe()8ede6]`R&VoJ0Nۻ<ҿ?͊7vg7Y>Oxu?^RwTE10]ذgaB/ Tq5Og+[qϖ`\w(Ah*-u[C܎Ș63hXhߊpbE~ `jXV8Z+@I+1HH~5Ijle j9p؃;A`cvm9`)0İ+a+gF|M90ix/8'3ՆdPrE'Bm~iNItivNW7J_v%FYT'?>˟MZ*m_>:wv|hr5i|fb1ZɳJ-^gj/|O{}/(΅}SM6#2 H4Qư[lKVJ޼vq7qh  dSh$y.dPcz5ݪkmn d]xl;J(@ 2ZY%P UXF)Hd)$$LZ 4LQ|1_73c_prdW}e\J Hf՜XCaFd̀|7$T!XKdn.j:]wkxݛRq3El!}]Ju?ayBO죤1&&ϼW,od0bmS)<>[A+~j2zmR{y5z9a(ƽYxjNh&mPR"˅>^f $}t+6"eBȷ&"CYLf^) m %@]a;W|8ķ/춇+)*$KS2s}^*㵥:?x>o~B>]˟NgATѫ![7f]>ȭGh\!L뭓5w8lPFVH7#O  !vrWY['Pg-Oh(< !bK D|Ch?p4;-BeQ~r;{I%-&5q;ÙFI 8}T"C|aF{ĐC?;G4N;!U7GW7DƜ{8(W787fǔr/=v^_@En6}tA9 qHZk1&19HƄCL(D:dv`'ɜ|EYc b XȒ6(]0:5|LATBThJ*g)EJ7^ҽ hⓃj&K5TAn3 U""T:$`LR}}A >-UC}D;Z}ULx;K%7w]j[ 2c-nBjT ]Ѱ,b^rFs 1۲FuT?q:L@*[$!=8tNmuo4 }Јާq>oz~la=[,[3/`@UKϝ׾ *dip{{so)mr1Q/cH|\eǵ"NݹPԘuv_O.L5W^S1FwJ<>̟Hͷ p={ANF@y5Db'`B \4eK)h=QEe1E&1Ʃ4NB-e'^RJL8'sh)$1dhE82îÝM=_Blc9C|/vo|ɩfk/9xn]^ 29Dm<֤{>|[I.ρu\sLH9!<4op[#dhR!bI '1{ѧDi8JQhRTf* + t~I]0/czi1 $\p`!O>k_z5CT:;&2:sx1)`+љu,Y]<_  u =-%j+Yk/+ȷop%> 9%u53z`a?Fw?|w5rRM>ښ?}}!I0miBlD-2vSuWrJM@ (LAhLKI @\c RX^gofƾ REE=`t\.|%bsۋ+?ȾuOyۜA95N2)N7ǻ$fVq*HM81]E-9g!1!m.$,d~_<-",.)%&= * &MBsC$ '|{r Qr^h8b+iR.*#ۓEJS@hkt3.Z&_&͂}{vE?Zr ;"$@8ڕ4<0Eu)de'TXMFI-up ']Pж% kћ=JU" $ P$02SN_t3frNDמj[pѫBZϩd_-ii}k}6-'^}W-8[& newΑUR#WA=Jvc۔M봝*西Ũ`iB=ut"ap:T d##  iL J9 O9ڂuO"?Hr"!}LX~8kҀ$Dp#֧_ ,rȟ_j$0;kkH.%(H/_T 2${5D&O)&Rg"Bϭ2_#Lg\tGXy HeiM>Mn^R1ƃ5yG,=R塶 ot|p@a4=XwTt?dP z[,"RӍNs(f+;9wLppL/jvyZ~)8Tqn^kw ɐ.G?NrTq9z?4>>S~ɝ{F2 PeEXNs9YM4[2_9g4_j\Lnv$賓:P#YQX})!9m=. )bRL)8"DQbs[dXX |X~g+~o3~*0~7jawQ>3濖1Gk[xy=ƃ=9$pSڪBXvsFUɜ|c}]߰Љk6Mgfs⭽wsrz'cۜ4r$+cY-A-9ܲ֏W*m֕JbnsdqQCJADsבK$µ(1OO<Ic$>FR1JO|X]{O4zOL+Sv̽2Be@2 4ܪ R%7@0` bk%RN::#iDi%F6O2,QvגaV[ln'؄h| d'i&M÷OOO)<P\ L tYh5'PQĥ& %F3b)OQ+EZR@@@yEo+t4Vmem m2'B`!Y(*-`2E(TSBRΰԔ9r DŽk 5[< @XG?nݻx,=i9_-y ,zf+*H-䠢A`p= LpO'zg>3s9"L1#S05 M3$YI31Zs-MMi&L 0pY DOӁ%z~Dr,sQdK@ FR܄,D171DNgښ6_a(*=heIʼn\TR2H +*$xEԉ oz{z{|a"F! "x֟Bs@s9$0V 6\JcXyBOCP,0'A -s` Am(n"%j,G&$*U~ɸ0aD K* U[iR' Z̓8BId 0FFVIPxJ`aP! XWB>ٽ3ɵ€h`꫈2F @M F'A|_>uLq T]w Z!=UZ3!ь/B-)Њ40Fh\Pڪ/a 摈|U{4_cLJ{~xg@.0 ZYV$zb~)Peȫߏr tٿ11ADn竿 Z.Ϗ'4ogAx} 1ܐBgz(l8m%jð^ݺ9Z@G+ 7`#(az_Қ<5pnqf`\q(`6L( /8 pńL 'pU$27_R sx;rZceR1]xW#i<\p!m9~ƮR2*P`DB5QHqͥinԪΥ)0N&s0w 77j5i|_"́]Bk$\jA#*#P>%Xcd{gdxٛ~  ?3_fIL|ӟ.{ӏ- ^w~=pgߓzFm׻71h4>:b8M4 6io|?=ػNI__}30qY,Ef(9"oXes-DNf~sz}͑>` ;N_}9o<.O܉iJi1xmf.iw)O; y[csәSNʢ3T؃ qHT~VxQ*hM.,7 vi&7a_|LabطA?:RM@KUz{ wEYKu_~'I+p S ^g.Ɩ8lx$@¯hg2 s$&g[^ 㖐'z=W/H&[@wNf4_~~ #ɑ߳OpՑ(.)w}ˈa{1m;g 9ݙ(Df풎dԬjbGt_Hj,ح۪\;ti6G W`I̡:-DK9nş1|hQQ2:!xX6@_5*H+j?W/OaV'|uǀy(PۏQƩk3[l|}ZC# zAa ΨeWSv7?a˜xlWKli>p iŝ/Ag ܀.?kV!SRvhis!hj0Z@L{j0JjeϸL{%+-%fTe@VCms@7쾡Yl$͐m/ E"?d`g>!qhʌE>M6L{q[X?hA2[f%enZ(uํz 殹=~tmGiGT)@|`/Hz da(/P*JAnӥ]Ley"A6`bḇrc9,)u4slq,ԉ=!;S_$f4< ЪʎV)-J Q_0ݍ<֏Xa2 :q0=ќ&2`(TRR!ܩZaF=p/kZpdb"vdG|' *raOj/+WFxno|ty>3JG$nx.Ff, ~xL]{\!Yh-׸sc&&|<= тkݐ"sbo; ̆joI'_}}Iӭhut޷Цv@ՓmԒB]6/h{pU [9FTAg֫~ҕwʊ@:IwVi)bSpr% 5ܱ߳Y^+O$Zji Q =OӶa$9 /غ[z/^Q_*2H=;x@cC ,G.&AZkZXo{ufphh\{uL$<IJ׏ , i$~eV,ַI!?ɹf"WrE$)QOY·n;T b jBÄ\&sMP5kB=ׄz \pPt! b\j DԱx. E, ޟxNnfݺ5Xro)%S;!wN=;..N~v3SͭB*TgϺU-ěO?hy}kZr Vtqxc;5/ƨ/Rp9w!A%÷J ,B(}"kC:J0\Pq!zQW=|E| yO=bɍaO\pV>ցX~_4M?D-91[pS_$Lrg[/gɁwK1kEio4󮓱Lzf`xyIS/߾o߽=hiطK3n[c.˴V?N^~?KML .>!-DίSϟPkO6~ŷþxE|v jKi0HVcmJaxzKjdj"Qs$ s T@"HJDB˗{GK^Ӈ^Bg%hq:̧g 艽hRgAi\Z=VBfyK:̦ޛiˋҽ RG޵8r"Kn N^l &xnhֻOZH-jFaJUbɊ[JM&,`n/f? .I&j/朮C!|P$F!*mMPH]J7;4aʺC7`:4EפE)*djm in$}2Z  1bgS}B2z~AͲ?T'O#F4&}HӬ#mG˫?l3{ɵ\V^a#ԼU5IAU=ûڃ&"dܓxi$_L'_̖抑(%$ahb`8f1m0 x^} @ M?gEQKDT=:^GDŽ׻I&'F1QZK&L$d{aEmxHV3 {u*e-)H'ש:Ӗ8I`g%ON"~bI -N>ro1+=~/p s`k0'~fE%7 n G}ٷ aOkŬ(A.<3LՌ$TNd>a>i>S|+O_щْbOU.XatZ:}?nP"RO90?^NU*mȳJ5auLԛ/ ⯧ c72@FgbT  ?%F%Wς(&j67>+l< V@HZ!LtZijM" ɟ< DbD0ʳKSl4XiOFN WDI,(lj65.5`'aR"h%IS5SDUY*H1NSBXvdH0R3BȐA ^Wo0Cb.=|=3Ne7(ec"-Lg;p'e~T}6*޿9[9;'iSOf6L+q粀[S0$wGMhޏ0,hӋn4 S_KZC*B*tzNfb ;"!C*6-U OkS QiG[M*u8f{LoٗlHJEp7db AL{u'.wBV,qҍPHP!l|h/3!bL1sxVvCΖgP9]ݎ!٠kQSR9] B/H@}?p^c'AQ$Ɇ~9(x{ ؄nz5s6Ņ#p677gGg{71w8_<5bFw& +J&Qkא)SJ،V-A-G\VA?׭ii` !5>j R,:n5WZ<;;FM99+>ܹBkmCf&<^ߞΞ(?!Q*1!ek/ 5"\4}}nE3m!NDq>5ի8K꿧ǫV#S#n+2]= X!X{LK'8aHG3S< ?^%˞ޣbRo^,$HDz(_,Ȗg Q[!DiV3xLD(F5%B 4 :i0U^*jM@N@!"G-5&u>T(,sB4rp=g"U*w5]F<+/ަ_. .fkyﰼp{} X…s(~ߐDD =yny)ڷS?MV.AD$'VAߞ:5}-(W}sd\[o(, Wj ף7ؐ9f FYz}MW(SH ^:Q}茗SKS}ch*1A fMпz bڰby/2h4dE2דԯX$>ٛm&^<.p̎ \f.wHJO]5YRLVAfZo&\pc I 8'oaq{|JK2>bS0b~8*Pͻ#d͟fgfWy b]82c/82c/qlQ?$1C! qF$1\)MbP,j 3ޥ12GGҏ׏o(f9' ښ%0Ma\+<) ƘL ,TV;ct"5PzGe!X >O<U2;G@S̤֒:i4:*zX!20u1X!%<"8K2DjO5jN۴哸γ̻/,_h./<3}^;:&2I`-|n*c"ce (ۦdjD1b8z}(\XpI'*xn=[  V\kƍhNzjh 4 kmK40k BzCkLU |8F"$c,9Ƥ59lH9XB$qNz;Dz(% tP!%@ a&aC*W% eۯ;;mGX]3z|Mރs:I.k>̇:H5E=F[Ǎa$SǽFeZpe`+}ȇ,Z 1!JcӪQNUuҥFY(]ST=һBm=n khwSl݈\, GK ^Ȯ0R.FRr$բZFҳõbeS2O˴b9E9V[gy).o5he,\Y{V8T ! ^=lBu4+:oOe4A oV`FkG>OVmp1zXء )-uH57"~ jj\̻CSYT[H|+4 g7\a+7F"{K7ћ黻o6H|8?4ɷ[ wWѷ.>zzXalGΦ.=MMt+sY.F/RzHq-e+un'VJ\;/ |}zm^W"C*"cvh$PY('vf 9D ;s6[^;#Z *&c>#1ftϙsދA^rnGw.Ϛ&`fWg@д6}dI{I9&JHeٖRW?pz㧼G8?<[fja\lgr+4Kn{޵LZ 76e J2+&*f&io??QYp !9%ݬet5_ nķm4- qeI_̱ $ #>{`Gp/y{zۍUR1J-+>C }*w^ߘX5f.~v~ި:-N7f~NbuB۟߇iL&Oп i(w|2X ʒgr=o{֒t ]P בrԯaDp_#_RWUeE2.u33j&tH=5,sJoa*%԰JiKVﴛwڹAGw.secpI>鼉/mgPyv&N+> g 8I ZE)1`krfW\r5Pc}j^w Լq)J[b?y 2MTggIA'7M  yKӌewrWeUH⒆~U30ůI>|<$3 {1Hf5Q^obajywӛ_G'%Rn[jT/ż `?ْLغxg&ߺuWO%5p{&wT؁)?#0W6eP )& ajbO`>{?g:y503S\v)17W_PSR|`K vs*Vn[+]f ٚ܅3ʹ: \=jVlA=C{)V`s]BT@ԃ*d L D{@ ;<KsAQ[}ҕ,o^Nuj7"Dfs}`*Fr٭8ϳ/C;'D]?}|;L>vS.s4I "VXՌ92?ڡ /3A8ݡ/ KeM8 } \QF^زw*0 !uSX}YM "]c;-> QKKuR|Q w^Т8%7W:_db;m~yc^{7-o~ӀBHl_=1* K9גŅ\Hїr}CY&}1D5xtUGiuJ*H:1XMHZ 쎃`&n}z; ~01?23G2cF% T(yed?>i&\p bȿz~oI|ȫ`?Sӳ5݀BLy2b: EdI/7>K|%^`D" dj*Zhcpn9Χv6KvZa8-znv& b,(@ 83>~NJxh뀉gY})"bR{=3~Ϟ/>Ov^ 狱&d@z&o$Ey("d&0yCk")̼[ 0e0WsS&\vP M_ }D (w#[##!܅#(^E1ľ~: {^oӑ1tA4IR:]'@ N.]ޅ%OX 9aVP 5 K4fGduZ9D*{G!A0kBC6}rU} r r R;nYy_%SujNKe\ TJO[ŘJf$YXf"f*x+ftwCoVkJ9/i6l.k)hy&y@r;v:_wn`y0r:+_ . 鶛 LqgS++LW0fu1 ap )p1"N*.?+΂th!dFOA!B 媿J뎻Sl76Z"g "(wQ#%vѶ&R,F fC8 aBmG`v<כbËEg֖},7ft^D#ܯ>yCnt*1o;wa9Xwo'a|֌ӗ2[/AUc>1V00!Cq;bФPX A ӳ_ w5 ʛ[H7zY ^0]]o:?@Mduo?ϬAmg{ӿ'w(&24G+FV>yu>9-y"Rք9ٸ}RQwy >Q%iF6->nKq$ |:`6)&{jx51[T+^>k>tC?Kyg;-曅η~ñ]*Zp\PkA+pŪovZ[j 4cكTJ/By*]ZddghVg/>:!Ѥ+c @,FRj) :4D Ȁ+Z@VqK=NjB@#F!Uih;D8( \0Qd` ѫ)R{ iS 9Y8N*Goj"oG}3 z:7XcYntγg{o_E]~~E^=0oWz>nx3DDpIt^u;7LBvޤ: :]o&.6pӻOm%;7HGd${!=k^B (YnIM6YyÆM[$)L _[xk3*ˤpegTaђ"@ZdehNhk\&C# /zP#*8OQYB08C|9B3|/R,X^K-:i*5D*(0A#]-V4'A:*r@(y-U TpZTw ݬڌRPQNzA] Ee&W Dol&4 L4Ӱ6DcJmϼT㠠mJtIX=۹!*7#F+h*g3x ox|2}i$0* Ebu$icGuvX%6RL1=?|?GsvQIyjvK%NCw-q1%9L!Ğ;'o6Hw`$w5X"b߉yס%gC,Ӷ9qTm"6vʐ|ł'-^$]ѻxqo$f̈́m#+hRJұnDxcL%rM22a4) DAhe7a"q>=d>խb1j-QNi|[y*{z.%_^ qr.hh=]WW ֟ .o3t5X C* ( 5GP[%vbVCyp`u66i^7pEDTglvѷY~9L` zR:7.' Uℊ8` , 絒i) k)4hO)&BzlT*rmUETÂ\.`AGzaWb1).T8[hUcyi2ûwM!~C&;b<FK3wbE1ͽa* 4G/ME=FK VGIcF\K֧ȖydۥcQdYG>ȱK;obT\octxi?}g^{,nŻʫiX հ}%TGdqAi<0;!- H3@,N'c A8<֑ ɔ*$1iMC}$wH,Gic~A/P{P|QZMD;SpFSu(߇Z$NO,e3q&?e E .WkeG׋7,S?k=ij_T}\Bo—+u0v'0 R#y6ܖ)7}Nq3E6gDYTY}YsF+U%֍H̻`ݪb2t>u;^FԩYA֭ʇh1|]ˆu?^Q~/ Z<*7OZI 6vR};lG?1 ,#xJb"1CO["M~; ֑s;3PHd*JN 2[D<<@lYoprvHB9>G)0~aj9fϯ9> ^lޏGhϋY8ޠ( ~'S`M҇271p371p<,O"4˔1R9 k2q{KEa!V錇g|3ߠ?SP3N+z[d&Źg~j*?a+(ADbǻ#dBJ欮9*@~a~j:=0*D>| Ԙ7.^ ŨAyFǏmّJӕR6i-T2R#kEeMxBҚ[q;9-!g?pLD?\rVp|C*$*kKEzkcWf@ *T6m̑^_-j\".e]eLB$W-bєm'z$rҾ}LG ͅOJ/u.vD.BǣLKz@F$lyƉ ]":8h@NPR}X2^s\} D~$\/9ѩ奮:Ϙ" *GR4X:nu.B6|Yc J hrZ).0>9+HRpLYl4L0˃46 auA`p'UP0`Mo7ӱ=2%WO7Q()x{ &[?epG?x闷'Ӆ>< q/z3ΝL\-pG dzu#\~wgL|1sM8ݔC $?|js 섽O=XtS3.V4o)޹ߛrvD*!͈j -P 7ВNCWOSnZڅ. /ތߍ~U֋#80G|v"U.N̐́NOB\>=+Ko~?#\VE$X٪[WfԹDV¢/G\Y C-'GOLjnZ9??e!a4 Gn#;e`3 8W8ky`DPeT7 #D(vR#pQ AT}8B0؀H_;%erc ZbmrRU"Z2| qD@`ʔOE.Xc.R ̕({T -EuSQee+2ay;͖0Mvlc{ ~[,I ~~R,ڦ7WW0{k);Ap[y.X`eϴ8 ElrbEԙ0Xaܨ Z UF +٥|deN%F)Y w0m1L88Y, 2B,jSV7%Ĉb.[:88ޥtF )J)rҵ@ m`Kcҵ 7SFW}TF#L}YSMw]PB|B]ٴZqj:; JIpcIWZ`ޕy3NER j˴Vq˳oF]C_~Qn5ǯ (֬[^ZØA&k LX,< <·5ϙ\IsY;oBt՚\?YWqk RĽ1,Iffx]tY)I:9[2zP۳9O#C;qNHP L3rT ~aGo,((z׃!Zr:G>RX8omހ5$l8?BE(ւu&>ˮbn0w>IAxJ4{]J*߄. Pp}x3s'ф籹aƟ/3y"/q>/bb*x(՝$ uOgF 6ޡN$/ l {Q3̮̋b&R$jܹfHv8z-S#VTTp,I#+VQt)q=Y:wm·VdۏY3u/IǦc qxU܍ڞ0dn=8UOl>]2ؽ`tG|",mM^~'ܿADdg."__~~ h<||z}G\ 0Ϳݻ&Ō*`C.zf4phPqW>FDeϐq0wŎy_7KY}z#9ׄ+jԗ]R`k0$.ڷqvUE?xB%@#~)Ax:X>Λêҙ뛧)j=e԰zX;OaCCOBpW.[|}blU_Al1Iy]^<n|O/ Lo0WLjb%TmZ7 ¹ މ R͖,` _>m\=H-=%, Nw$hk,_1IM`}f@C͟dr5W^p{ń3W:|$FRkfos cSP!$Q Ƀ"J4DpuH<- _c>Ol;~rs-$6yLbq?s=?i ֑'m7w0>MCCRc1ʱ%8'NrByȝ5V`O,&kOf(J8 AYDG0p QJj{n/^s{ 03t]Xd!_߾Xr%J¢*i EMWWw9$1ti0Nw1E@/;fm1ppX$\W]Ĕfwހ.hoGYM/4ȼmoXxГ$;wIƒ^J=SɌlTclmʛPbN\K7\tS.>?ںP˵*ĄizB)ǞSpM$3Nnv.FRXvi*hڞje4-!$A]!(#.%e!0HܰtÚ!KWĔ1R'CFkq0\uTĹɮ+.[-lV}ˑ`T 6 *jgn9pPu4,Sx ֗~1%(mbXXm :u.)x`:X$ \i60ǵ5y5DY4qQΩI߿{]Ej͎)N!0 b(}b>hAASt U/ a So*O*@MzhA3]+9 aXXx]ȉL#pPU. oKqo@[i"@PKfsrIV Hйg`,:3ڧI.1h '1]iP/.V*[jYof&GWs3??.(KA ֏.Ċ4H뾯LeR5_{jS{UEUyBD\ B(*#DCw6`՛j 5" X[I``;E ~?;zÆ tOg&P]Le=؋T%qnogM|I`ڮyY,rL@KdzgRkBxG4wX}ԁիhep4};4fn0KhaVMy^-IP,]wâ2g\q%1?̒ DOs @5Vrɠ އ^yzQop?GR#<]^aZe 1&y`7jqLOm,Bh|pU8Mhj/}jܿQpՈo è&d)&@R& ㊌޵Fr#_e;H9H`yذgw K[j2k-U)YՒTY'Ȉ qE=VT駟Ovpxr/<:pj:K.@5?D\&c\gQ!LF+ fb*P`9j"!&jZፖĤT5hbΌzTl`5>졊Y)mwG?+6WW7W s{Tp+" dLBAxk3oh5-?jNՠfՅ5Rw{\UF.}8z4lyiO ҂f^@$ meHO 'ţ91@,ŀrR>O/K( =]}onØ^߾K>|Q~:\-n-?N?ˇwi@4o+ wGN,:şw_~n>":ي&xG)?%Aӎyw[]zSW\*Z 3N.9H\1S=' l"9Doͮ m/f9܌i\Ksna)  (vH8haI2G[T3omc^-p_↾X>ˬY!T1Ld` iamBғAsdt2UR-Dz`!#l>d5d|a5iFm#hrDYgh,grbԿ('8_SQںc&vZ;3%Wͼz$KRÎսZ d-jΡk׃59m\_k@ $ V/'?wxSRK9u!I[6DK랯jbz*3 Iqj0H)eFɛg}oC`GYHM/hCp dpg%CSIS^!`!'XD0/~~ǖT2':VJܐF:g0+#8jPw+LCźgN#u7uYs>l|9xr>LWK#~rΘ*&mM^v;gh ^!= Yt:nLx{ε^:ШDbUQ\e~NI/:,ʗcj2aQa_g'A a^.,f[rڊ;oHVϴw\t\#îUh7B/oLf &#v<2"S{0@Lgy٣^ M?jˁNN;!诞⨝cct2Ԏx?ش:eVO2TʀRpҭ G ڔB&dpևa].)Z4 f*ϏxbR8>я#dsv}sYi< txi74V?|CHq곑z@`I=J.gaK 嶡xerRqMYEp) 3\BKʎ> A=<9I5N%9&5u6K ez1?LHs$(cےYFe*IRPѺ ` YV\v4GJc 0{1GCKnNٕbt4uk3 ӢIF57.{D+:[{жJyDJY8 l䱚JWU=f5c]r4>œh+=8rAj*XX$̓зM4SrUЙn7Y!5! lxVS/.ah1r9eo|}%mbo.\ZaE+RHO]^pµY&6 G2{ey'!,?,DR@L~f¯ϖ.") 6eK?\ͬRˉK[e$AqMqJbҀ-{T$m-D/xHO⽟*2VR[FvܪySZkZi1Rm,1-$lc:nozU%N4%P)+Z;߾u:J1*fkT)@ڗ k)ܡFB'ϒ q6x63zq-nc >ѡ ϛTblUk.-g)(C?}[N$_nK8k<ʗ缬.ͣty2;gbbIlRFmMFZ!h6wIwOYݨD+S냽mt ؀^4ʑ}NVۮex&`,!˩2OR†g_VN[Ml't|xjO/ז=qSķ b$*_@拴!Emn/prjăw[1$z-T ^VoۂwjUg#ˆ:lAͧ9׶n[eGACrS@GU@T_qK}$8'KүuU$!{Jۉӽ-"FeAZPIn#7\#>w|yw,8m."h.YY Ih0攭%e B*)Z-ԆT*[P+m#-Hcj(G1gCDh $kT:)Y<@Z!,krnQ h%\id鎵.%JQFG-Ke4NX(wVĿWK-'``{wMzUa9g-[ي̭ )RUh=Px Rւ6Ekf{ N2:'Ȥϒ&W[6q0D g5 $Z:(VK7y R9EٰvqE"D~VԀ~2@d $lZؼ`"c&T,cK@TedD&2o I'LR^"9T]F%d :}HA9Sxׅ(, 1 dBu!RhR 'σ 78wMu>v٭bkOztpAfcNYWS㚛Z'I, Q}IbҝnFTZ'e[lkmVJ1x#ǽ 8jԭ023[yʘG-[@k!62R}Pb÷V ; [CS.^rE܇@SK1*ģ++a[}wja;Qw^(=#RΰZO7*iZ:lo zw蹽Ժuvc=4jy"wGآ?9Q T(ᶵ3g^{7@pGT0)wb^s~ -az&BlJA mՅN=YmZN9)!d 'p]9Ui%`'l}UQ!ܡ[(=Hİ{/^cB6IA@VfLAyAdA] =%}Y:{R>_\n~._ҹPռ\QzS'8V Qr <[Umj1 8z.Ч}ziWCG` Mu˭ٚ8)ϔ׆7xw==iFۅ/o{M¢X<ǚ툾z8W׆mtꨃn~?m/[TG0+_blclbu 5#Z/}׉>[XK+쒏A$n:_|^!aC1ioxQ'ѬLe IsS EB A"qЀpeTI̵ #Y .6< ԡTQ,lE(m# JL`RDo,/"3O'?@enÊ~I2+8GܓCJ[KDngEO % f2"4qdrW ;ސUV&9R2P.':A!Rd,UX.ʧ GZRSѸ9oazĻ/"îOJ̍X wE6>^t xF!4HY%еhBu[q ,?oֻC왫Ҫc^t7l'-ߗ x9ܞVh a~HW7g o./7xώmnFE/w,5}.{3LvdG-{%Nn+lӲ$Ȳ\d,DF^%cF՘~ȧi"S۹%Rw=S2DuO->0G=s//Yz73\Y/XQ8|:ia~0jkzZ%@BsigjQV?NC VZ..IijJ.~)B-6tˌDf$>T]5^ .f^\)G)$"j,vŬtS|䘻Js/^WUP.|tqVx}^í52xLӆ^xAV;B޻LLJڢ2F!7u"6zs6Z.0O!#O#˫WGQVYP 5D南TW<^5,,rvY6.~ BE$ĸ8z̼yf ̣I:6**JpсLP{67[k;䛈8L5/Wg vAcqgeDPU[,՚& 4NMpGE|v+_0 >5dDڰ!7,ڧr1Y ]2S [ lw%_0,mnJ}a:t|tpĥ`#^>Hx*ijMXubjl+w!(kukG*G8z4!ZC/\r$LPz=_$7o1ȂkdQl֌ DljIV[c팵w4kfpRR\[Y}zkCP(BLw!˫;gf2(쪽k·\p΅2?tjl kitlG(S{T(MLr*<4d<RoSNE4[XC&djdG4K)fxιdY4dI*GIsXNBդK['L! }OPdC< 7@*4 f2$߁(iѨ&I r $^x\U*3e8薬ɨL(bHnh<[j`(a + EZu8 iE$΋hy80u/pAl ҬeHq4KY;- `d?<|R`3ߣ *uuBLLMʱo ~~6_t8;oiR9-~\dzЏ)^D[1 zytu-eF:DKF S3`PA$gr5)\{TܙPDx xkp)6eLfRkWbPz &c/e\yC&p.?IFdBjõIO/(UhԸb9w^tP`8 Zw!JJi6C!g:T_$KsWX30fD c4_4;kή ͐ͺs,Vjj.l!cCmLK*1Js3cKQ,ڕ7X8ɟQT}>]u #TTGюL$WB9Ww~.* uߎ!im\&"@2?&n8˙7*0(e[]kuCr\g2sIz3Mj iˑ]MyM\|PC6{Vi2-Ekܛ{!P*X[ A7@k@ tjHw1yܹ%4<U猅CS4:aVA'P;NI; Q;?47]*|}1~u\9la,m\]=(&mclv mz&ݹ6fϝ>+;j3osgf]o4̐BjаM.a64|Z{Kf\R$kC"w7l>+pђFPm; R١彂p9@n-KRfY9g-c< mRke&E=l8Yu\L7.īai 1 Z[}aTWuŏѣ8RäcV Hʒͻ Jr nƝi:. } tD]u7 `DTgƴs>%U\@:Dzὤj$ .8vxc@E41) 4ORcE&ϳ̦]-1$fnU㻁@V݁Mĉbϖ1(Bf^;^o3N\`B/ߪ ՖЛ#og!_*OߓSongAQ#Z8%{\לۓ~5j)[F>(yWb ĪDSC{#:*p@߂o4Z7%4E2}spB"ISV\@}ꏆR7 3tnyHmA92r\2ZTp02..ښMQqs.ctGq7Hć0ē MN^jPCzh4FjS2.JC3v ;hYڋs];qExSh,J9%b LR?_i~yy9zM\eNgoNFFq{xn3QB2}~*?%>fEy޺~*Zy8zhޢY|1ΐXc_zIV{e+U|q-;w:hCVj7ΣZmZ6츶:kw2 Bhe PgXOpM$QPђ %O ! ˥%?;HuS4*ӫb1Ұ3w95VD֐FofGMfq.g@)U-Hx%ҚwtK-QFV!$ -crj3_}nW§ۅ-ƻ% 3ҳXw7Qǣϗ-xN[{$~ J3/lAf5 >;t?[t_9֫KVz$:UH7\ڹna/hVzA9jgXGdjD kB#=:6B3N!' O}qCFRk8L~K`^ 2vܪ5U$d䠡6aL6 0ĐBrc:֝;ƒBٵ'x O, KQD*>s- ;!f/ޕ6BŰ%}4O3axiL̼x U<$u}3Iţ,fI/22"2#b(`=۵[P+Ґ3W2:%2?m' [Y bT3Xe@<Ƭ[nUhș:1p7Q}Y[_;>!کu9&|ϟ~=R`QEWM}-a0*E*D>,"f}sQeBUP;/)Vv2va)]#$ NDH)9?Og߲|`JvN[*QK!jkw]zw1%ոep%eDx3p?ДԇuՁ=gYOH)7NB R ,Aj_i S4\[MQTuI" Wqx!ת3*)ӊicB# WXi $@rg,I!8d!{atOV:|_M躚G!vL4BN!} j> ri}S &@3Rփ(GQXugt-c+[YZ:)kp[ -qZ(8ƒp3a { 205I{_/31_5IO!XMИߥkh<>w! AuY>,FDmñد8Bfrǹz~dGusж g!d# DC0b{!Hk(s@AZTG([R }fۜ'H60lԍ rJ=Kkj/f8nQԃIpvJZn0r*g>/.գ]WpT˳y)%Dy;nX {JJCÔi!PK81KBBegW԰"b&m{5`W6ʓ%mц[ܣoS× X6qǗz±*|;IVظh:Ie~/!#dz$`4) LnVW{70kkE@:G! `)I08G&oexs8a]>r,| l!\%K†iERFs͞cK׌bbaЍ)ZmyS.v9ȦDM@#ԙ4(Mv FVTBaKh?-DN*dC4{*];$;ǜ8`HB--@Llʜ3S!֔緉qm96(*2e.EX9K|wrTo)YYaӧ9ՐXU5!p\׍8nCdrR""ls;-$#y-: J" 1!T!G)7THke=+% vr-SZpņ)/U*NAy:È ZfxH=es܄ڹJ4t\&VȡfPqPevP6/B5.x%0D}NW7|'s*eU" 6)~LRt3|3'P1yU"R:Gg8@$w"ZS #л d= ʷmP2UwؼQ *;T_V6BPK)eLBP`9XXTɔyT BsISi0@Sgr`=V3ުkR*lx1qtXO&_XoMVԦ4zǫ 5 9%dṿۤCf4yz:[l5F(  22c:$yOܖS { v'&&T1%b N[u|콊q0~s/C脻k}A:S"]ڋ0! 2dhJ*mD91;55cj*& 3- š W/!7׫ i-eHe˶ (PK"P@2$8]/ g>)h}K~oL)!| 1s$$LлX'Bha(eāToeȉyx~;}t#WNllS;3GNҋZ7:j){u)?ߋZ;9aMv:rDQs`t;D{2lwKab)'Q_-'`[Fڶ׭:Viu΢in.:)HKZbV6X C↊{+b uܜ*?E|<#Eqgw~hu#1|g /GR/@t@N"3QyM*JGP\JP;8;9kNRCtGʴ}[L qrzEH`Ҷ ƹ&yLӗscvjx9Ŝ>^k2uS?{/fNY A|O߶XͿ&$ˬ~-$BVA!+$5Fhj3c[uxޙñyk8 1L3+̯ҁpG_lx eJ&DZbjl9.(<}|F3&WW8(MiH*5ZPRWc 0DN4bnnvK!HKO{se1 ?R܆1 V0fbPߘ nQ,12<&_Ʉ^Z Z/oRca]YA__p<{QEK'*<4hh_ z^O.q^ō⩻!^n SHIyoRzE~,uYG^ѵ@S%E !))Qv[xz6.]eayI2qփw4-=?-,3~V.HʕZiEXGKNA*w$2=}AO"W׈";2E}pQr ) U!27уWJ!zdz:tfHB~0,wրf?N&S5O 7[=3+v#//L6rip;f*5X}߆W5zLA 5bG@*OY}jbv Pe>(*qn,6 %ky [2ire cBݺT'n4vɼsf.Ꮶ~4mHsu3:5K&,K&,ñUeG|6D#Pr}gW]BS7M&2z}pj͗yh j4W/iޫp3 A5uhh7 5 qIڂ=а+Dlp:d).zɰ폗&Fn޺ ] ͞f2!L&2Ir=2^o)~%sp)!0GyFᗅgser²xCiʌ؍6%?n1PvjN"*2TT 'E)Xf j{|e]xAx\<̌}Og߲`JvN[bdf}795/dx`vwq)z׉bqo_O)SβJmpaĦ'Rh~:nyÞ^[g63igTIL j:\6M 4~b('S!Q](ju pӓ@+brW 4.=_#y;K1:2"K@Y*g+6b<Xb ]bsd ;8)w~e &La)(Wf +1JlR#ٔtK"Dlz7N"o=^NҹN7(vrY=rɬow>M!#Rl)Ǣv}YSJ8 q%Kq+On ̹뽟]xK2@=Sj$ DmG}03`瓴`KP}.,:[ql{{`V^|@HZTWDt9},..^l0uM @N4:^LV?11Ť^O5.80}nݪncً0ެ枆 >~PœYL�{j>{^Z~b/kmgȇb%_ Cv-Y,v7:vKy3|Q'-qNS-Q!93pLe6 }/j``}0d㦣QL; ŋt1&>O1;c"+U6}mgvjj%|@E#w?_>;$u4Cg~ߜf8Lg֗Ɇog`۫ި~?EⅫo׃p~]d}~zs2]Ruruǫ.k.,l](op (Lux>{jzX_v zvF삾߽}tFɇ'>xۋߏo!cuܙL{,m7Y{Gb0ye"مNngKx,PzU&Y7MnпLF x{Fxp>ڬ\ͼ|ʴ >I@JǽaAG]4'n: n'=LQ'u< B"~ |Hʹ?y8"Dgn.h}UwAQ5?2xΎ2m^!\Y}N=XBŗnV03Vθo{#=yg>L/ƿ I_x釸^'!__ËE+Ǡ-pV՛pz>e=[o"9c4ϡ\\i&_Άo o'5q?x'Pa2e$ӹLkZô#y0?]gM߯bM4{PP3&g岎 Dk-t ܦ%BӅw)b%Xb|B2Dm{!sV1ϗ}W0#-DƤJÜE0kϧAv~N3|3 ς dsXEo]L?npY VnPL&.3XG Z%X$Xƽ3,w;Cޱ%PXz Y&drKs]: ro9A%Ϟި̅zZo8/df|kQ k5R7KB|}D8F!ҜzM{T [#q}ָK/'*Sp[Ϝ_:_ũ vΝ_ĹpKrB[ Vn-K[˦[s$ws{ætϼιnQp-XaneY6o.&C#pYwb*NKN&A(&fUz0rU FO7H$OTVޝ,hc}α;26(s^CÎ%!c]5%JHQ߱٫0D3%R_nl+݀LJڳѷߓ(=/mHG:5|pN߾~N+| ɜ>:AO'J/^пې"7;﹁!a 5Jj&1"&%rV9 ߪ@ER3;ur2br|2iS˃K3rT`#&+~Pۃᤗ\L*1+̩C7٥'vpWDT}}ח%'7day4 A0:qK g7^.i(g TҚٚR Q;LcMSRL=Db\ʌNu{WU>#qW oD)ڥp!bfR\g̳ŞRܻυjmB]>E:_&ȗErlN9)_⾧H sK}ok򇃩6l2o<TYI4Z CUc.B Ts>̀P1 E@ofa%1Y7I%tQ>qNsxDD ,;9hs׬`o7} Tԃ㉋yqMfG+u^՞IKT3OK Ӛ*&2% =OL*P¥R/w250P&'yZgC,-)=uw%T0HY'Ԉ!t'K}6-C.FhʦTRiְQiμ:iWCV%ևAks rOBh6MKs{]d L 'qDCz'V$gU)4Ȍ\::ɹ@#'HBy˵%v*$B84bWwr#Ӑ DbJa(ҚS&\8")K ZdrI6PS Yq1pcBpBQnDmjDIJ-MװXPmUSKASXv&iH ,;=V{ 0q6*1'e;f#@T,QDs#f&% Wb+5``Ej>$D3hA_9h`hUݶ֭ ҉J6QB (e3*A[E& ?`(2HSB:ԥi;@!*x*%C4淭kUYϤ0RWWT&x`LpDvɡfD$a0Z$JhJ20[8On}#<\Y_R lIRuG xPHB9*,1}8Ox"-8݅L7$xg 7me^-UxW)w7@ܚMe6/ӰkJ/֐a~,l$&,TQ4 PToe;z2bZ9J9ՙ8zgfi7S`{=IL$"`|Qƒ#B!)話A*mtčWr"2ZI欨r`T(T)re 5n[yPtD2r+ƔzM43dbVFcc)rז.r5ކ(TR\zޒbq[@im g XUQ&rK}H*%2BPG@^d %S,As3Щ3d<Ȕ?ɶtxscE/@,* d$ vVVI`+<6$d.G/}YF+N.V.}{[9n%:m$Opa"g ICF#Ğ똄7ƕYa)2,=Aj2ƜUHhWJu:)2BL9E,ϴ$]=L튴{D eU X騄֢QyFerhn`UbM!ÚStﮓ+-?_wj2zHޭ>|7p}? HHH_ uB%06JA,:r?pɃuTĬfnyDsy"@JH"Rӏ0"!MQIU*}(YJ$$,/-MC@@ߜ3\*9 !H/X==gƐwIbaQpYXGZ/XYP+IG@JёHC}9LĴ0h "0i dM_yN bt,V'?y azD>iϾКD".BP5XkC!ڔ>> ->%xVbo7K L\F xhINS{E8V](oPqi=႓ifEe5E%@oi\4b^Tw²-ݒ XI4  n-Mx:k>?7RZ?'~t '_s.<.w=[+|\OES.~y}Oj4Jv[g,Q=[яz704UfT҆J^S+54S uj%pB1)1ȌJGc!Yr]T#c. p MQ0 ]ĮqԻrUILbhSP4_ݱݱ>lUz&[I=Ig/iAvy}ErF|^}z‰:qX/ބW݌SnKl{əjẒp}!*E9&ѕ tM]. 9W w$ g*ߔQ1T'a֌Wޤ2c2lRvM6?J_vG2wK;zuܻ7)']+F~6ةMV|^VΡ{xට.!yӂ"eJr&Ƙ @˭QJ.J 59`Zzd'=Xޮh,h#zAnÒ neI ECh +lj?VcՈ/"-ucj9ۻU W>ca-Ah9i=scNΥ9XTF*] 62ǢL!+_wBikQ :mv 5$Gںπg6X۶hI=nPС//mH 25wM6$:5ji&cdLمĨV((dFSEk_]vx_jrirp3Y#B.3'0V'2.|$CHgk^C(BP.ᬋ1 AJd_`dE"cb"23TY7 E]~ԓb_&XSxSNmںsj{AnNXsIX₈^-RXUm2h1MU"[/q_@z"O[áq>Q0|#;yǤI+UEk+$*VlaIpY_F~zl'c2EY\]P=zܣafDdqfNgD/p`)1Fʉm  D8Q+9;2ʳcrxO( 4dt!{fFU~i% T"}~pH+>_ϘѪx)CБM_>ilmT+:PpdS7Gzx#[hJl1G6gv2:QPpd3PNsdc A&OT6Ig-m̬dAhǪ#⇸Yӡ`xnjbI1y#nj0 xqHsߞY ψno`x,/?C A ̱ U< ^2cˉT*m|LTT `m4 *}FP ZCR|L]5@q&HdpeH|8z ?b,3,$)),ȾT N+bg\;2Z4(?H`ߨ0Uڛ&t( g;X<(]25*.s)B\9+ y*8 3dæwÀk&[B#'|[gwtD1-🗿ڽ3Q3inC0_nܭ@IΞMʏ+w]XouVYԈÓǚQ]j/7e`ԧ>1w?c"VxxY0nEIz[3 ۊ탶bFvRAj`aFqIw R ~]b# TbE{&4r{,&y8w?J6$>C4k+WF/9'{M0NhyNv ;IŎ ([)nM{Z^? ˻k'߮6ǯߤ,~P6˜BF024$r^!p0\/ŔtΚ79g`x2Jη-i+2קTM;t,B5`SIO?|m3Ɖ.,V5A0f\Mr-:$5k4d!99JuBU("OΏ `,t7c"ܲIJ*Y$Z~t{ tssu7=ahU1^]NmcSz.}wS[9b׀МvB4r~'z:u=۹O}˞^EN%މJAqI A~x )c??7:oWkϝ#'Orl*$ [(,hF_~{uo;n1AsRK`:rw C/oHdbȽCM%zX0Lv.R<4gC)eWJ|J|05V;8"̾Tזk[z&g)ٛO *i!:Ө)Y1!lزOS-4̍KEx% C'RP%Jr[c)_D7+8.ݐiWlh$JFC\d);vY ek'(0s{_٧<㕳Q?ؔyaj0/X40vu 8 S2&4٤^frpeN52x^6F'w~nM\R$ vcG;k*q&[)V_u\ !U:7i;J]6冴jSN'Dz9Uㆈ:yARݐӤ)RGh-g`(r8y+N+"P9CcNykP ёA)kI '{u~KF@q 2!D)1g km4Q=A8'#.zRjYx}PmBb37ۚY*,gG\F5HU8Lo|zzqp>iiCy5dr=GH.G3\Yk>s0!77S ?}Oh|␮ȁ)w"<9e}08k2NXMƈ F~:v6=u4NE78ćV |XFá ȇ8l8lLIn!Dg/(ͧ9R6-f5]h(LR{>~lEaȭ CfHuVkȶw})}J=97#\I5E0Lhhli4Yz.D43*5@M]Yb$4l:jjρFeʩjP.mdO"#JՠBNmﭡ}s=)XѠC1M,Ny{ :iR ј ?~T?-3ܹhB bAjD£ O H62ja-(rHGFY3D Nh6$^ QQ7jF"Fߣz)7 y+t ZRX4tXYȈros)%]=;:k?i˙2\3BKlR 'sZ~\m5gDW5+.H[Y}Jso1B,M}m>Y5Xu[_V j55x[ |P,rO" Y4sse0G4qz\Bn~ی:kΣXHڮR<ЦA-w9I!vy91f4@y"\~4v+Ob$ǚsˍI{nNt;l]aHhzh6CZD$_t(NoKt6[Gぬƭs8U &3tF%ʂM DQNgQm 3z$e)]#Gi lhILe{J _(֊wyd_!++%W AT)K?MZ bP C= zf7oJ]͠+׹7n^F"ONWG)o *et3z y6"PVZc %[4_O|cwLC$E;~̏7/Ȳ;dK!uky3n2Y}voƷ[V|`yv|WC^[نʨŇh4(6{P[8JL9wβ?ǤrH2.{u+;!4:qFCeΕQsۻc';='IuO57w&j~=r VX2䃝pPAump:1[ juB^zqVq na!:$0`hxALJZYcZODEiqɿ-c[{_3J\iOY7uѯ-9d>x"oش(#9W>HP*ڑt(Hv Pm 0_rZ\'[jV{arB`T{΅?^ B1`u1 д © f6@mlI,"h6"T$tN9Sj"Pb @45+6kXq7[6GT|x̣ٞ: Uw{[jߜ I[S. =΅C{p"sZCJ3.nJp㆛J WTא%0Ԉ\!”rjDUT&T1^@C mH5Av$z.BvBI_^`AI)aU8 IY(N$ԇ :\:x!w AQQ/Т>Њh@yc\ym Nq5}Rš@4aΕ`?sjs NB=JJ!}"lb1$\J,)dewz;[˖P5v;\x/tKڭBj&$+ѽeJ*tk]oW~ݟNm-|THeOw:#rpPŕe Sp1#<6+bZWD29NWjP0 ~#;.'Wq2?@/X6mŊB\{Vԧ_GCo-Gv,V܏= sX14-{"N~%'(w@i߆6# Fp? zddwE6ZnC·Q;39l ́ N*3 (fɚ mWmp8 ya&2ٍ / YWg{Y;'qAx6-7g`i`zFQx#HqiG*vH$0[zyu3aWh ;II @;kzb|T [`xcjh1ɉ C= ҄gy蔏KC%shM$C,悴j~-꩔XҝN&t62&(S帠,rP ˽P$(0f R4EE-tSu%6FU윣}dܜ-dh#xFQmDŽL'DNi=`/^M߰{7& 9Hk#|>n?>eXV \#wpF  I#wwkWyڬ¹j[+D6Q{TˆP],.B ձfJ g]> .e1p ڢX-3@QFpCbX7u%6V"Oz!3=OSܧU<:LRcA NDH!:{YIOVRF˨npFVWt?8p DQoX7`ߨ˾\(>Q-d;6:[-S=IyQk];܈]6tⴶ9C.ڹO!OkÝ*$b9( 3&`6ɣ.3QL-֫w}~3/ zDWܽZPq{Ӻ+SQۑx*9T+%כ݌,|c Gg Yש˶ά˓\yYGϬkC>wGel2)s^ taw1F!F+#`a2k)" u[-j'"K婴S@ sQ ɞ"JOe (爩BEy#U(ı>IqJ'jvֱ4~N٧ңO>4a*FtCP0Ezf.0ҿ$: r`LuRÅiOS&i;-vh&pDzɗi@&`B?ƺ4 jM4ziAspE-x\!v^H&ZϸwU 4P.V762nwYQ\n(Î F;l bcGY-y-`[2N("g6X"xvqZ}lK5RzkĭQ4榴aHPŀ[Xڇp 6븋z'M0Y^Ȯv@g y>6(8| {5hQ~3!.(=lbPH`Dr^.N&67(k'QCQGFƺ~Y(q[G U `2ozJiGܢZP'=\_W=Hك|kA!d ldbMU7~{L%Xg!WW:ݧ<\^_է/+E,ݙշ ϓ*7[߆߆:TN oi¤Cp /ާ޴5ñ}8H}с&8DGQYˈKu`ս" BRPܺ E?lovZ)޻H]R;y#i,9O!Ό4N&Q^W[W8PbMt]Ib>x{f7"}RuZy7ԔM4핢@v&EH/L2f'-_6&\EM#VUT䅣oJ9nA tնz+O|qru k*5~o`/T %@2)W:=2jh#.^ڐ Bf(z6RS&k'5}-F!$z<3=ӧl,|@jMNsǜef񱡭x\G,.^3!?D&b̵c@]⌎9{ y$0)ٷSl$~ӿ^@3B*ww`imBPi3JËENV %QeS)ꋼ O|,b8!fEHvJNf6a ->C+Nu\Wn imOJ7 '5 Bk:1z:,E%9 Qt~@$gO3Qw>(MKDnichiA$Դk* r00bf"gVI:Ӫ $˥ ęp*QE&ZIo7yR( 6SZ'=Jpu}wZTYi53FπYlF&X)9hPa`"a XezD2P +ڊ ׆z]V]^f1O&)-u*_Rr) ƅ>G瀷6]{KD?_ĝ9 pIeMRk]!o\fU쫵p#dwW-#-=Qq!FIj H"Dj=Afb Gcr9T1"-MM%рDsj BP*V8ΜU* `-[XGrno?Ė*{;O+aU3yitJ_x/Pnwݷ_~դi1_mT~cb?}9#L=7??=?WEhzF\Q/Oݼ#"|u{5}\P^!R׫n>ަWUjz]⋈SiTaJ*krz"ӄ5vitaFw~5 0R9Nf-THl@"eLs  iriayTm bQhtO_Yr|uT2(Qe+~z1IC +g!->"2b%:6荏*$$$J1τ t$߽RGAWg][sF+,LΐEU~HVjg&8N6.UhRO2= R"Hb- Oҧϥ~M93ԤbO&4~ҘPN®H_T%_^l*a' s=$7ʻ ~Qzb &h9i -'p*=0dJS^V:B.\]!;f`aya3[ CLUV \JK)rהq>cܶUORm1EmYLJ` /ߵa46(8dhVL+Q1*3b;)"9yi-xܓXh48уC[c}lpMMdmhLhBcN\.%2JX) PSZEN=g%1oojc|Kg0֫[pjLXR3mv4]6YD'n.~r;?[]痫,Չd5qoS`Ӻ1q"n]Xĭ ͇Eq64I֥%~0D3t:OFvm 'xSإ6my-/'qYx*XiY5SgT ґHq-&0lI B<.a^q@n 6Կ̹ZrbrO--<".g4c5p3-pͫOQ. "ҚjR`'?Z|׼!3 H|};{oٳ7;d=7[\xГѻtdWm_'3!ܢ aSPhiLy+Hqr.C=nCz|dCVgk[ݶ/A: YG)':{V Zʨ;gO1i;ц9R֧Z |Tf\5qjP*hOw.vd<%4+BJNe)wx6 Ʊvf!`5@ gS6gXV9wHGhmb(VOڛ=a-pLK~dj$3[|ʢXNt[cWxx"y]C@ز}~X,]h?o]` U Nj,u]opcbjv{37BR&t^,*6]D谲#eӨ]Jgߺ ~Y#CXVoXJn[ي ~d9Vn5! y""S$wW8Գ&RCnNU[ J#Y⋞֭ y"$S ~vb4Qb- uq x.m^/Ӻ5!!\Dd*R1VT5r O$+EP~sL9ux+JkUV9U(u+JELq@YG 7S>' ,(i3Jhp, j{ץ&9;37y G/r @\k;aoݶ|4tͻji~5ĄN]Bs5w;dLu X@4]lOYYΥI~%}1)5/?8SۖsrV=3 o$}ϢUR\#֘=HͲTHsvf*Ip%nOI^@ʻT12%=II5MjG(bYڪ1)r\CKyaafK0!H9pW%?/K<#0RK.ud$/  pIڐ 듢EP!50//o4L%Z4BK/`uְ+u؊]B z}M.\oy9Mzq,y0/֖ h˾=^;_}U`|YK)Jgɯuɖ[lwhkB5=(ORNeS[²!*0e%4x"\\P>c?}  n[:qsh)|p_ݽu{tm^w}ձfsm$Fq KSl8YI %#U,I`J~vya}ݸ, p:h;p&/Lg{}0M- c;ۉvXer"\ʛ5n>by9݃#WbֽY~}YMW_9Gj7j fIc(%˫_"-&H U/z04WJ(Um B2fJ+} !Qpv@.o0疑fDm}Z؎`iQ2u6Bc(bc 4qJ.99|xmA]v @I a11Vp6)HekKbSx8TUl]m.kPv,.]B)JEkPwF )1I$eY]F[p"R+ViL38cOçT֐֌ʊ?P֚ ׹ރY6B"_RF ;z9_=6fgl %G`m%£!SF/v`]NhrB9ēr2A *@rAdG/Ma DKA4$M.ts^[+^4Qr@P Ońhs;R` #hIko6\'o}^{'b5eNn6YE 1 ¨5_@jۥw'2ƚ𖻎7k4o0Lrv|9n],coX1ns>wpǸ%~U8J1 \r@vaGfJL}&!%Rr+&k-Dr `qzgaW1FxƧ”ÏoFtZ,ǻM[70\_^٣ QZsEj} J _㉖X#xb&V3Bs{>:!&)7{ނR@!IV"]? gu&3(W(,N1hpM1r + 6JR h>RA,h)eEΝ;>;)OAQf&4(Qa,fY$ DIJb΄$AV7Hd2 ~o">jh >9DA._|/z2̚@HɮHR# V/0ACbT׵&Q=:yUгSư,¾ Lc^ãkZe'kԄQlveT( 8Q>:< Lb,et'`ƥ ۉAoKB ;<1CJst!UIrQq^1xڸ^*WJˁ P岿:A>Ow\[[G)Otέs vmЎߥzs6= D1.7靛}sPnrj,s wdzxBB5&Nm*P08y0"H|6":{>NZA 1ҲH JTa!ĥ T>7-TO¬jE#T *Qgk(0ǗstgjP\uJ)WaQT_̱Թ!UfHe-7]%YXGU38>՜`^S`jX<o޾mkK|f=?kXOBU"hw(z'o`޼ނ)ZwY@䟲BŊS+BsQR2 !0/SL̩s)`@3! ,'T'Q@{ FPFt:v|᜛fHDX%v*@bA{ gF6dFsZ(Yi5KFt3i`EeB1=iRNgԧIEۓ!q Nrgƻ;*wҼ7.|{_}v3џףrUM)t=z71`?,]7"u6hp&[NK~qQOuZyCI%Y'aY$ǐz5 3r#SxS] $6ImlQ6iI zKY -.EV'hODbhGɅ ~d \yIr!JZK")AQ254Q d]KDP䄉U]1S)ȻA$Ʉ˘PlSO+#FTK-J0 MtDOh;}۬ Xp8/h:?^՝?4,{WU=H t=>zr/ףOld_}E zV_7gKOF/\20?-R7Zܹ݅~=dT1O/zG%Llk,}LD;RXΉ{ 8ڍ O';t9qϙ9Xӛ(4ruPj`:#iq K[y]Prbzh&ӎB$De]6@k1} ddwkH Q7L| fA9#lߓ9"E"JIj‹G͘/g qĨGr5TS\Z8ÒɌty4PRwCeTZziE Te2Ϛ% +T_A88r Aټ̋/lܒV9Q$(@-h.W.gF bUݞ=ۦFnks]-l]ޗG{y>}(A(iTI1U *V%ت&l/@vNP1bۻu<ܐ$S*T1EYهBrg3 )E3ޯA!=ۘr09BRFL7hu-h[۳ VUȐ,\7si T,o,dHK,i!сuڮ]3wX7t?s!q` ` ggv2Fօ)jBKs C C!,2'ҁ[ }}n?: c: :N 20YOܓ#"*ŚMM2-)5KP(2LSB\qkajş]?%\J xrJCZ<2/⚃nˑÏ) # 8Jfyf5ur:wwA#c[>Wcԅ-gL6E/߭wD3i$h^R ?Ua h)lW/ L1%|?HprU)LUK+ϥfΉZxnǰQYdkISpƭYzGsP\@3Lf<O@jݱ_tnN($4<!Jk2xLG +%)x$Jb>EMIC{(t -)c}lM 5X}׏/&M+zTǕX}'2Ɗ mO4A7Ҋ!Lp±ULwH IgTƟo:U»er!W%Ի95OY1&i64mӉ A͗d',?=;/1RbBXYr97$ ]X}Oz{ҫ]Ig疔Z=$,qe`uB!c؄){I1y#eoVsRp^Smf;O[ӫ$O>KУ(g55B`xe5dMTjdi9yA84E"Ӎc 4pj0^2N*fq4rÅ[D]+IDkM|a[8j< ` tmV}c&ߟ,F:(iua۠Z4.Fo~ZL޾a»nwZvESUvP/v>h>ϻ_VY/l}qdԎ,0EcoY+5/ݝ_bYVњғtDS Jm 0*>nN Vۻ$!G.eJ.ۺ7i]W’F-d>|E"=!=Dan Hyl&z&m| ,F6+V\jVL^; (C͉*/Dߩ=_8nyhԯGRMekAvbMTx2 ϓų~ mXK{7,!8'47HLg !p <07xoH+ $] 4c,ۿ5I&yL>ܖd72­t.[v[O[OK[ } ;iOdVم:rr{Dz1{vJˢ82ΫOϵ f"m/$.07˱c_' r<(<)&#t5?>)"Zkzʷ甍>k\y:p$ziGHΛC Lxd2+W 3>ַmnX'@+v@֒"pCbc΄*fW6\L &x^ L5RT3%==Ou\P~TAڟj> =E`TS#fڎTun}ޗw4i>jFE6wpmW~r ]A\#|/]bWmlo2@yq}1WEl. 7OvbB]PbLSDMLCBM*[k tYT)P|7 K0kt魰g⩚0)$X|4}CiAh m(93t.m7$/Jn_4W!!G.dJHTAɨM%#L/+嬬1Y[ް6*ruH'h>1Q-fnYQ^|( ~1?0 5 cKJP5t瘧kt"O}oYjKBY)-S ??OBW@ia'?j)'s6ՌM_$tTJxo.L%kvԡQ`A*((_| '- yVzRB+#z W2x}DtG8O s + [hM'dq >2? 2ц J5J/&'Uel2|$YJܬ`7Fjt NǽXIszA=7@/5%p9o,-Ej!uyuxi>ĕooBNvߟv.LR x]VׅXw<(D& xUAPeD@q&5%ApP FYsB}PkcHضs^8F$-My|} cA;"hq#*eS\,jY{)x w2gDMS HJFXyA ͔ŏPpƙi!!qA@K˅8fv(If;zC߉/"n߂UVyh dTV أeG1|&dm5tChO<#'qArNzK-tDnИn8Z!ihŁ _G;,"7kTlto Dnj|:Sc̈́qTL]w:FS"[0JD *:Dd #XA|Sh7/Գy\rD&rsUPk"I)'HI)vM8o,TqKS+GEx3 D/Y8n!ɄQ.YYo  Z$?lӅ.D96OO4}s0JSRQL8)yjqho%6*owP/~UN'mg6Y|Vc1 ;iO' 4PE6v!7w{arnFѸLbo1HHzc^X&CH&wJ\YKūX&AIPbvB$\͐q=d7fKEI:{aqԷSP| /ԇa(㞫XZJǿV5D|4yV4frO&؅s>CEM$!㤿hcY̸,\eH-Vb 2syLNekaFˇ)gF6dqorBI{孺x ߸,krT*6* /"Պ21)wLS'+ pC? *B}-tp $5:E ڐ県p 8pdNlA>$a6@f//Ht'=֎toʁ=&v ISڎ J(YP[> !6<(OίR~o]> }?ziqio)uwWEro,z ^{2U!_h CkԚ*JiiB"W- FAVI EkQPo age Ѕ { &-@7_qoHvZϿ2'YB߯C6_I .ϙ6*Ԍ` GZKMjNk@!(}Gxd(D"Mo>D??UPRKk*i=x5&0kxr\kd Sh"bU0<ZYNtRFj'FoSR`ldou<×32>YL 3\Sb)` C2:P#"V3*s!(8l: LTg8*.VEgvuqo?_\Hs.?3B+TsaWw^^-Ws+>X' J4§o.Dghi)JcMpܽ8# sJD`lF6^LN5(#Gt1}[bXܙO$IA22Ut) IU[Gԇg\ 9VohNOSVe4 p!q~zi L 0VVcpu%)?Q-dQkxW406.Qx˯J+F)FmNT;Baj^#PPRcnrRd&^t X5= zݣ^+">(Ęk_Ӧ>4#p.D}tA-x[Oo~cRJ WVKNYOhQcsמqV0Aq+LPYg ݎ(.{ /:.)AW@<5:;Ya Y Z0W҅SK(%^OA) j‘:mbzZF*Bb8’,7 <ƒuEfI)J@Yjj )3$nbBgS>P(H+DBpxB1%?֧ R-0g|~j5V2SƠY9|K,l'皉!k[g]("TK]_VEjF6ݫJSh8Fn]XwSR1`%=sgZI4&pg kS*R0lC- .NQڿ.ˡ[\lgq>fL~Epoau`Z.F1,[XMq ,d8/,yMY?o޼㕑:YA'`>w8#@18y(FżPfEO椝_=9 igh,GHŨ]:ݴͽ$41MhV8%ʀ\qa"˵ż_x8KbK Ak@!l8Agp.hp̈́.Ck 2psVu kpYǚ* P&RȱvxX Jc|a !#=\P[7`uO1] *Dq3KB=מ52^E)C.5V]S0=lM s!mAyo?_o-`ʏk ︋9pu+Gg.?,Ael},ڋ{Fi8LhǑШ5C [tRD<p\mFJۻۓaljjed0D­fWE}Z}_p gb"19jN8 8RׄKnۂC e\!3T1%RK]n\Z/Ȍ`}ؕ[sialɭ&ӞyGaʋd|NsXP!,`D%vw(vW{+Qba154H_晤y3m9λPmq(-HSgy[,`;[F)w> OViN ʬ>Q~ny9y9迒Io{){n]W_ rڲnVs/ 4r4*ؐ$cWè P+]3A83 R[tAi)'-.UgJI|p)!H*2 !8Fޯ3Lߊj0V#.~s~TSծZC<[wSCQںGPJWwRkRBPfa]EhqY/]灀|~{H,p?p__3YJ>n[v=t#-ᅾ 7/ݪ1VwIX`[QE2\ ~2JwC-]8Mi%mdN,s,92T8)&ĝ9ɼ$G%ŁLUIchv5cSH?ڛj8\N*Cq&{(TZWnB)*`DI`8 e?VLN$-T#3ѥ49,)oAږn'xn*yhKdwCp7 j; bRv&h)theg $zZ;66vƩ2u&aBPGxkoIK3)՘!PH>xf8TcN(zݖL +1״1e:`΄s୻)83O8`4.#mJp'Hԛ9Iklaѹ>/<%y@Ub[JݥTW6S|˲5^8Hr,Ys6d뽗B}RhN-&Q/4'a;Oݥ,|`wy+@5mDa{hgH)5 fg؂_Wnxt!Go^!:DxU?zQ٨8vp-F_* 5jx`tS:4mt8JQNC馯*Fa!/DSlJS~r# r)Mcx*?=O=|_ Oƌg >Gr тOGdB^ؔb&K`ikH0޲3Mǧ'n# kz ZCU?bZ޶m*Ǜ^yB[$C Ud&-\H.pu^QRP*>a?tPL8BU|AQ0QǰF>!/߉&L솔N5ו[1QFDCIsTe$,k;uq!̫$jEfNPf/̜) j!FLO d&DEe!YF" @h|FR:12}UyK|gr8S^9R4ИgׇR%ed6r&NiJ#H< QL3\25X%QFsP#2{X5HE'К⼋NKQYz:Vd4N3kd=Y#:݌e|X>pVMYZr?EЀo]A_~@q6йں[QByRМ)KQ ز2O̍'qRJPZ[aO٘ kV$Mqkjh1Mj(ObCfQiFܡf{~MC/s^^5"V\.:rN/e$.qgXTz2 _S.i@Kw}P`s_i0Э#F9_34rq:4Twy " [$5y؊P;Fẝ5LP3)%l9E!!ב3B8YQ3cwޜx G]bF\q1Vnrw91Tt:ctx7*4O1kC0zC%k'}Zm v]f)C$:U5A*Rr#w/(Vv: su/ jT2&LSO.p*c22(3mĔlɆSkÞ_e/]';pks7,kAr\E^BEHE{G+O+){(3Br9MN<ɩɹ\9O i0 24/)4[8XF˲ {XL.`ɜLlbkZVHO$z !i;eOTL^Z"r]ymh"n֜kꫢpߛ5^-ĎWj?Z^M\64ew}[|by:h|IC N/||4%/  ڗ9P= |Eyuo1kL[^2ɏZZT2lD]d{@?Р QUzS Q!bZ0<3BTJ(UiT9 XMޮ Y-eWᯐs&5\0A̺S AO:^Pz]-]>#͝E<g ֺmvEL!=)]br: Rt1Y6f:Ej3qSfyk"V#%Ńd=wpw>\^18]t]jy^OڀkP>sZt ncYNk6d;Fw}1ê$voۑۮ_u9|"e`ڱן0^9[axEJ /),>UqcTĪ1RJ5JуB=Ul甴hJDtPzyTֲ8vUۚKޑˍv/tGx@JXyqʻ=n<ͨ#Sr)ڨ烫kD%ev\'ZnAo(}W>6U,c( 5]C̐21"b9r̩d6aJV\*u8 +N<-80UpC+\ujJ#ձ+yvVbDŸf%D~^Jmϐڑ\qT9r3HS:#QpG%[e[p#LjڈЯJc/\Oqs3r9GIPnDV{AG4T8!v6挂"!6[| o%J@ܝ<3M>zdGT҄N:I2A[!-"XK C0 /`nsAF"Y#m3D"tH<3~r痢k, 1 Q-dRqv[x IUIlkՖX+Is+X2aQIyi+*pCK-9bX}U]j"&${_uV<|`I6 |g!ޖ h!˸n6xtݹMfLN?'cE=ߔHH+~R0p͋_A9_f := ]X|1k]2 *R* B*вN }UԍYn)6U%O~ݤѻbc:hݎ9JMnn]Xn۔Šiq8:ZܡmSX#)FDw ԑbsƕ t(ܘyWYvso!rXNu^x/hRmӚSe '1w*hZso-BL:9qyrxM ``%q(8uaa + Έ0%}8Ð c&LNr4M3.# blueA g/ߖg N4T]s+aXQ{.\b.㝋 ))dڐ!h.[EK8V ]?$^LgXTS"xyu<8Z#P;U)rKE\F(#j,Y'D¦6{&X(Ge)4@Gx8x~z^Z f&d<;²ץ]#KKK1;4RLzzn@FZ;u>ɿb^oU.RS`u&@nxƘ!˴WBy'#!nf!Rg'sgۘ2|&Q.$.nJ\=>̭40M&ʫWn1:hQAPcz>J[i<.m'g# ֽιC%qa^QЬ,D I F)eJMT(A%B>V8917 Z1KCro4:g^Oa+fS?&nr'曭z+ѥ N0489=auYh%T[}4ݼH|c`:jr >M1@veUtEtӅ< Ր&w^ś7s %ze7!5e7l_QP¤By8.(2SsLW7𖽀+[/> 1<sٜMzI4"p,S4fy.  );{7YҠ!TSӅԅ8פHfoxK1ÑX>{D^n~-q\P )T~Ht3 Ոt,Tsk+QG˪\^aYqP^+&O1G'ǖWn*96^KT)7ݲn;:Om-MȴAr쐺'9 I KF=6]?2Z*t< <7a)'&R#Fҫ:hv'"]SD<Պ9p)`Okg^rr #%Fxi;$7`F$7(UM,P>?^ne* hmMc!f+-T-’{YXB^>njJ|w?ssE$zt|E$IU1OU)>[1-tPo3RW;d$uTSqFCALA!3I7գ&qM) lS {T!'WlҔ-Ln<{Sõz."l5ɽCaÞQm+nB }trl#VrI9;D=簕pw_/XsbT9B+3 S`#=eZy}5*#1/G{58x>J%] TtQbȗM۰#m :03aJ-a7O>'_& ugdr_TIxu4$ظ$?N՜}V#n~1Gs~ pTt vm|9iږVH+ :,O~,R v>B2`E;|-b"ȶZ҃5qQM4L";r.#,ce2Lg\eV`](3bq 3rS$g0r$U `= (D yM4!`ceK6w sXRNr1ye4s%Q#t*AqCJoҀ=UPd(8I0P) (nթT]7Lr- I$5;a ]/f~lՔֲvt40H[ҖAKɓlrX S$79Ok!qz]S2q$1#ʼn8J?fv`WBh֖Ny;޸ڒ!3 (~2@y#N QggLJQa'8ӂd2X*VehMJs9vϥ0ѕs+ jqU:T?{_ڰgRd.y'!M`:LMr7nM^6 ;69d rmߔv_>cӺ"=}*ǯK߻EEedב|&bS䊱'ލa"R11gnǜgh0Πݺ/D)Aa#ҳ9#17]cҌ5 cĖ+26#|5>y! grHN!S4q4fQqK¨ nD;h"[xhTE@3PȞ][PPƥƒTZ\-GwN:h4֎~8UƜa8[M} t<ڄf-YUaK"`y1 ѺP%LIiܑ%Kꔫ0k0!ߡ ǣ`wIH_TdγGJVB!Jpv&Sqo^AfbVrjb*yȭE8PiM4r LI7uKn-ņʐ T*1i.hKOX-U{G=EvdmZ/*$9:ɋ2@hBB'2M9=ƼN`4WƻG9{??Յn7ǧYu`KTeqas,3V /FYD[mF2G֤>(&5PFBbcrabDf 6ƁưvkeKͥљZY15[!̬0"O\'{WF/V,2@>wd IN|I|albV+v$N~X*O頴s UD*MuZ 9ǼUS[}^ZUbdWg+NɊBCy$ћ_[B J8WNB茒͜^ ^b%:_"ke)8F!e:tEZȏ!|ڧnؙ&"xˉezyt)e7\OԸϤ b*1< ֢٢/> 2pj\OpTXzpƔbUPVTXk]pܸib[47\M\pi3lo5JyoAj)(ϳ%rSto>fY,w{Ljb|'3!_!҂(9ة۟|W1CN튿1- }G $A; ZEpGsWO屢Oe]!;:J[3߫+O-s{vZO Cgo>`vҚ!q|!zp#V|8~ԻSb[/*JQ~ϱu>dK p쉿 %(=X,MהFj_ FJP+K\jomOg9hvb˺$k+f\ypHcܙ(r[2e޲={DtO4h!Wpf:qo KdQdȬsSѕç 9CIף<𞰢l(a?k (!(,c&A468I1(N\X}x]ˎwpb%*o% )5Dp,P깲RHjbj!(*k8'kAo-qPZ#lԢXKײͯ5M=b`$ ZZyaZ֎pEuXP! m<Dv"hOgAQ^S1zEAF0v!q.O(PCP H'-ڢE [B)7rv umtuy4lJ RÔ$bp12$$L!k+ '$.}Fm@nY:}Y[y63kB^ٔIʻEM)}F6N-P݊_4׻ua!/DlJ$n4"v+ tJƻ -KĘH ݊_4׻ua!/DlqbwSMzR rLm)k$J6!݊_4׻ua!/D) &;vx$wVB-adbhLu\Wbql[1JXf[Hhpn3X\<iҐv[W/!ukfn(çEۏS֩eu#PR?u܊҅4XSE/.RIJM~TqPb}&L O.Dv+|d !iޭ hĩ +' С.Ш=6R~Bn|#q~KQۻ`s_r Ԫz٦rգq(Rކ+ou]Ϳ%Kb`Oۢ,%nh9^hTx`1 ]:RD;BTdJc?qB7c~$g<^Jŵ찃dG1evY;IϮïi=a&~=ȫ8J*VB>'msߝOf ?]Uy\ʅDd =28ծ?>yV]@Q+i1,|FZA:VerUϴd@7̾_!NnI誾L5HztY` دKW*rl֩ toySgZ͕^W 6 [[nWs5\#l9MFTUpT_|" Zu˭IJtc8_<׊NѝOq&Ȍp}r³V-a}] 8F iH7x] k}5~@[+I;F="_yMU dniMJrN*ʹ}M&h1&*D+*Vڅ}jBZ;l*C"35w8N򢦴g`Y`u`(nW]RnfY@jY o%p & .T*e)XnsҠp:KU7ĉ0#uAC)VP8ϴT%RwӅ&T)K'O-Ya=W-"C<>ȹG#Ȼ<%K-{}A$ 1b#+zi~]]w_n'f]qn1JF\~SE,gb?Y$u v%.O rY;n6mQ=';iB[@(⸴A7!uO7fB9%DRk|Zrl|E:7d/CMYY?_d*~[Uqu\mWm;ԳϕZ!'68X|56e)5:5ZX*m=GzKߐⴑ2eOJf},|25G_Xpؘ)虉. TYY\,H/Svn F>7u#ρ~7SYR2K{1RlNYZ7[~ʺw]qWioUso\sҩMp zDc 5R_ mPjF a䌧5)AESVÏ@NX4:SFrvK؏|ǥXeL}{mncsw~n9}x1V$]넲r4gֿՀX8}ߺG٘TY[P|Qo.MUV󫕾O5w7ޤvyvqs}]}k^\~1˛k{~em/sp~vSϽ|xy vy{;P E-r8ha5x0ڙZ`@I>DcoGeԡS ZcZ̀+-Q;dO9Viu-n\ΉwWi1SbfRIaUH ٴIwWO*7xҥ@g|Ef~߮ZoHDbgZ\EVB,7sc拿 ~>}'wu80oDcSX:7bU.8)g 1SI{}Dz"ҫ^6/ dǴ\:o(甙F4hm) PEז]w-}PQݵͺ| s{2AD۵o/־bsݧ,>Ԋ\* 3T %mN3RhkFl[ 䭸11B x=+1J#>]ӭi bJI bg1tL{-a[3f1 *qTX9KA&-1={bq{( g2u׵:L"0" ]X0 'WvQ,hK{Iӿ-?MŇ gPRzx4*ZW9ކntxG-n/&}hQ\^R2eOob#.K_\Ժ_E߿`To׏'櫹^ cz} XkaCqT6-sJV4F(H\v%Tn:-!NF\@j#2: A@3n$1J}aBp+ yh 2%چEC;hҲVJV*$;Y5eYKQ%KFkLu$둴^fK1T6_V)ԼWh/ )'3b qL80Bx]:CcN쀘62kWĄ!H|i@KڰJNؔ2.b;+9Nͻ@8S g|{2a#bi|~81"&cLR1E1Tt'15G)TlfmKxwTTf6#x)a1:PZ9,-;J+.[0O'1~CaJgH #7j5N5Zc=B@[_ݥ`<ަZ/ن]1A 2i(3=_n漛g1j3wTܣ*-vU#By1b+ vopj~3FLc=>Lݿx|HsEGx#zMЭ).\`;JJnjo*Uv.h(P0* Ʌ6Bk-䊫qi.!u Z \haC; mlWp>1yQ(hUBL˷0%cNԵ if`9̝4TP#R'Xf68 nZ(+$~䟝\J_:ߨ]˱_$tk $_Jh#ꀾe}QfJgx߸އ7*4~cz5HpM@h$({;rto 7MQ M)35AqeeB%(+t<b"+I6n(Q̹v҂su$'K%{qA΀.sdN80D KQr@NyMhcc7{IGҴԼD/HJo%8 ]Yo#9+zghjnL4 .OZ{d9u)fReFr*"$P'53-jyV)B%*myn}Lw\,栚`aSrg\x7P<9*쇫Bg1q,-IV$eXsZu<+eƣw7Z~bz_/m')ujX[}=ٞK.v(JBڣ!+㽨h`(Zhnh㛛念ʚ"^Ƚ| n,7 $\c7$bDjfMUmݾq-ڣP8CE͂uEFBFÄBql t8?Z󹿯ݛlVS%Wq:Lp-R3̳kb4a]ރ.0WwgKOS }P,4{KzyP-V.zn]aOzHmfq0M%,$1rSEn/w{ۋw[/7ޥ2z`VQ:| " dP c#9:3M@EJ_ rF'+ PEd5+qk\#0k'gdI|-1-AP RP1d,Œ ԑ X"X nP!'BP勉8af"xK\r&j L[R#'gN3VXY4#Et28(2YkaSFNP/L<`n G-ɵWbڹXY'v&րB|©[N;AWz\uONqp LJ)N|Wq] Z I&q#'[f.-5#$=3t84W0=i)#5tPs>Ml[q_Ml˼Y}qEFZfK \z :n\7G:4ќu*2!͹!k^d]IY8l\!'KN[LNmiu-g<\m.<\5 <5IRH7-XZ_nǞ T&K ژ:G\V3ԣ͎ n5Ufi yϲwdo޶d.{< ndke8bGS;7O/^nᶎJ{<4N!)N0#D^`Q[MJF`5V΁Y֑Z+4r63',P'5ns,TDhEF0JAZ@@ `PHUZ WI [q U+ @UKRsj{: -A F}%iɂ5i)mUZH&PúԎMVX+y([mjb@+9eX %PcƙGAJ51"N5 lcDۨE0%KLb*.UDP "DZfJfW`7m/u1}x3;}-)\=|VڔZSߍulaH3N:CY_,xk}iRJoӮ,b+v[{]E\B`?~9x%kHы#w磿ܬ]|fy4{_#`)%z`h*'zWD/CBNot0~fU=`ySO[?uStO3|B/=_w (zWغd}xrMݵ*Cm[Y]\2G5{ֲ/>ޟj%Avdce]t3;ׯf[Qr}3"1(e~BUmxwuCJJ6,.Nkw0e囱~;S; Z@1QBR: 5 *ժD1$,Gz4@uvf )x $nb@l'nf^ %+8;MJǘQ&}z GWDOKٶ .| #Yi\ܰ<g8.CL$䍋hLi45nNMOka]kԁLֆq-)V;+vSjR rDt&pgS޴[y{vkCB޸SOh7Udڭ)v; |\Lֆq_A{*Հ,ƸڒFiȒR}iZ#/juZF +~x^'K5>:-S8Gh3uU EG~Ϡ_druys=3 m`WP*b`~䯳c2\]//}粁IC1鹂ZOAJZ}{.0a:!^<>Z630skƈZ&afRݴ)h'Qn!@q|gIu!EPL`T~xK_OiP<֦ꅺwuY /R+1)na补Lr_=r_6DZ2N_BU@<ZƬBm[κJU)wAMY,0Pa,5,櫼}–1Hhz}"jxdHОޡOS7jf qzT{fA| \b*z|2/KUWk-Vźj~2Pˮ$d^Md]Օx$+Dgt& ֵv@1 Q=)AU-/TO adFU yL0޺=m)!敵 +y{:قTjo/fM!B"ubFwN?8JuF:ܙ=w+n>tU|'1%06fJӘbm.Z'e LCu$*oeK##*(Q4l9=t 3ȑ@O=@7:6F nx&DEDsJEKIZZSMX/OjB.$#kuhvnr^P2_/+iDo'_u,1h[$' + r@E2Ćӈb 'ϖ]yewVg {d=׼+ճ{gf2ڪ]Q=Uӟ٣0j0jXWa>ra]aIЂ=EÒ0Ѽb޽z]C_ ҕ=@}

Z,c 0:P'#xmKA 2J #8F爑r.(1%"{vUQi=l1wc+R$0k~KûYFb.$bŸ(E3b=f$+a(_"^OLp+qm8> ;;qPMI?ƌɓ+|.(]รD#:EJ=+SjE{2Z<Ċ#(אaaHJUVD\y#R1KjWQ9o׆*Hkkmg7Y/߱b,°zwa[4ثG\6\}mz* ga<Ŝ {iݒN_pqlkY:Mekw@EY:a29}e3.8)<"S]]z['jt q.`|K6' W} %Cu (p3ߜ/eO`ߒ g f7XR]/&PqiזV2RI8'`'0,(bǕAȂs6*gtЈ[9Q9I56< j5ΫFh; -D#fgDvY)4zDaNVh1 TTfa䫛J!\V %tg 3g3:4!c!vGF$/huu *7, KEk׋+Īy}^x=?&Oas JrzBYLzuyR 9"5\'i, jؿ->|zpwAof;4Xs&ZQw%oW0>-T8ZWG7-'( 1Jq*ԚS9/pW=7l,3'/)ȹ.̹ 0}=>Ũ%+Z{9'9XFK د]m4 a4TA 4hM)6{BN5QZIe"1$2d0X qDXH.)@K!A&TyY\FP7U|x[xa.?=`$ߟoq5!aR,#{OO?][s7+,SK RO lfeJ!)'ޭR2" ^lJ_7 Ct9ujcvq*A9zy҄:=R#Z+R܇c #\BJP;>$xBPZ )izrhWkk(~DKΗ.# -!;!t1aHN?wWBjV9~Ll"?)э(111b'AĂ)w?^Χ㛫KsB[J%^7)͐R{KG(\=27nŪoh!FC2|n}̹en=LV8^wG90^YZ- kg7T!}r{ >yG' e%7Jm;J f0TsJ!ԃ :;KO2m9PWac6,ia@Qj bh$y86h4!HG:bG: X6jABtT }/3q='ǖӽYc,ϣ*((3,VX}^8BjQRKB&)|㸸2ŵk~_S kwaM.)@s3K! ^s>z& K0— -ޡBdCKhuwg[f|#%Gᖪc?. d :3u*f  h2)DWe^= 鱮 plU2ШQ{[lNص'vXS@VLkJUu'P)iO<٨}}B@Jp.-gqrvbo9;$؝mSt6yCS8Hz9-;Ҭvj;80J qd慄;E g[6Djd) {i^2OpjC[plRtKv64N)ǜfu4)2C6m٠MF)E;8w &pɈ*ݪr&kvv_`ra Bݺd CЧ T놜Kr$ZAw(DT3;ihHy36C~">jkJbI(3+(mVP勒Ln md iDqOpy)y=@@ HSA>0œ73o-޲n"@_[(&UuT/`{%\:%=jC%e`m`/ *u[XÝ;H ل$T[so \v긔dz1}pzsluܖ2b` #_]{1\72Yo <7nqw|qĹ 17w;觋ﵸhV9 +%%j4Kxj4w7Zn) IX+ai1}~of $6CF{uZ3du$z5յZ)3NT~'9K8>.M|8 GO`LJDz!fiES'.L22) z=\ ,1v"NUymH+U;K}u6&k@kvMd"k:zNC;֫{W~1ܰnQCsM3x.=_?4$N:&^kׅDtʔLc&( j($J" g!yN~NJIՃvr|fs&,䞫!v8a?Fkey]ǻ 8:4b\ُIc(g|ç+ƕҖaB!)"BQ9Ɲ/*Rd0y+ PZLofw @x%́aC@;)L(VKv.[̉Duvr|Ϙ{cW;{|* ٽAb[@]1l>^Dxˇ39Ti<P P{Cd[Ezf V7z}l m rƇBόb5']Sb 0O90q780M?E}yq(hhzDzlb(=wtӽkqm4Y&bݟʲR[hH#ՠ42kΓkmG'(ETgg6>䱵q3miH}/A/SDt:"u!M{Yn67uu _Dv0PQ W(B’{C J!O;Vق' MY ͐"+9sn9YM6Y f|EH0JP$49uxтz8(CQdR$Zc[jH. F'GZt =sY<& 0;FA[Wb*K! '.J)Aqb)"8`HL]9I tx;[I@RGG \C@#XaXaIZdVm3#!=IƩ R; %XnO ?̨RA4,y i.h+X祒2b"`a-ɏJ "Zpcm%ur㥆@HOvƟĄ6 :1tO!hX>P%YrC (P2-됝9s.ayAf΍M sA(fL\ s?EVOMup,k%F|.-`SFϟOQ)j522= 9&Aj%k[ C}B[ K[}D 2-@ZH2ط_mk‰84HR?Lz[:(WOEy7f-K7z1)T~7wpj }<)٪56߽\Bޛm<]7nQ>],4JEԑau2j`-J"7c*A_V' Wֳ2-k\|d!_j;mVu=m8NSVЧ;}OmNP t B[jˮ )OpnyN{>EHACS甘86 HE鵝#hMɡ0x7jINR116xuݮN{\ cn9,+7$/ώwD!xT bL'6vRLzn7"[ M4:_ {?/VMo zؗXq&"b}eW2ږIۻw̼_a=R?Kjo6*Ԓ|JdNp-,M >>&k^rނ'1죛-{} TVd%Qj|dƹ|z 2̖IuQC%C_(聍I*;9:'{GDIZgq'U+̻ªTQ[ߣS(91;6b褐S->ůc?MTѣ{TU1MӦtRCO-P1 S?S1CE?qCr3\lVA0u BĎ|:Dƞlgs^td:/8i,ɑ2{lʼni4; ,(Z%>a Q1~_;>]B<6]Hӳ}'t9=:Gy@3mͺ9~ۄ3v0 5h'onE@#BhEr;؆X8(֙|^y ldM==UkE"= {/׆(<f2{Յ?@o | dƏqͲpSB! N3E`l|hBXB DQ2^:[-r#dK[dS[b~ n^b<u޼?-n Esק¢vYOY۬zh+5}.迸xL -K6=`"> '1d,c,Ǥ1O~Zx<0[(7l%*s) oȍP-dHKh$P,^f$Q;%߭ED΄dwnn?TGNAKY~xqq/*vn; |$r-2~S4vf\ƌ\Ѓq1W1Gn>u{$[Y&ym|iu|(EueNZibu YmT,/\  ktQZN"DPs^41Xc)|;/F آ\ 6-~_<ƈqC:1q ?{ǍAno12Yŧq>d[\M[d+6߯83Z{1=;vWŪ"Yskg3f@[A V)=t>Tξ۟^u ZuzxWdJEz(,V {/)b[]KC,4( c4v#0#s dk 8Jy1$2G|;5 )mY/˒Ù@cf^e>,@{LcZ ]N1BtBFCp Im(`B`5 A$iNjȎi.JsrYAU='3{ %T19^ i$w5[.|or!IϖL!ߟw62ZL1~ Rn9pDW=g8#Tf.0+~.s7ASZЌ?kDB"S Ƹ; +9nPZ%(Rl"\G_v*q_Ƥ8zW Ǵh5Q8:2K⊫َw_3F3и`VK=3Z׀m)#C󝿺 9Q !y;2I35-ՍkmX\ jM*Y/WuQOʪwW.޺/a-c>W:d~ NF_! }/p@(xr7T(} *m>8{MbvX}=8{@gj[eۤzN;dNmyttmal"p SbNn<8:6(yJ;cfA &{اCa9!SP0"^*tovo1yΤ=OJ^iyf:*|=L^xR1) E2ELR]T, `ef&̂2$뽒"{-w^: I]|X cL8Z]B%8:>2ϻլ*scɄ1YZk"YQ`9rH!*&kcɑĒ$ x:w`],.LoQZYQ2P~i"ڸLn\DHCQm,9oO@߽:Ϝ}N=dH+s~^)Pv-0f:(r׉c03 E[e+zo7-5jcz,-7g5/? ,ږ-ߖ/ik7w?3 ԌYɕggYwWW$Lfş3^/4?snZi .xV'^) >\(QzZ)ٯn>P?= Hd'yѨK=ƑƑ$f wceX kE)>!nW:I˾.%*rHS=6;Z?h}Eԅ}*) Ŀfg=Cp|C]ܤf kp"jPzwC+nnꁹ*ӯW.zHD$Ez9p߶xͿ=k+yeK%vqiB/Cg& < 3d&Pz${>ol5NjQ`K*w"Lw5~ZfnEp*̷qEY4թmوక*M{it-*4G#DC y O6fr^m ז!=#f:/%hL_h{C<~Nf~ }1e q2Ⱦ> iD S$G(Hk@P2r-55j"=U2Q狊 x44x*"3< d=f Yq\ &⧇.0q% 1B`E>Xn%j&fS$A/; fzᎀ,Dлh'Ej_T=hm,qˠoz@:v!LjhAhsX]|վlvdQO5VF )@[ *@A9dB1KQ#G )NNa2!UAKœLZq1G: , w.j9Y(72W/,gۓB A  FK 2H89Db:jb" '5gʞ ƸD O`J|%$tl I'FI'H^%+Sg<ҒUSPFO\zۜ3\4y_Gkp5cSoSTkIlWzAR5_j˖$W\:a61^ ZMulKzC7)MM1hwgi1ZF>j;n+ ZeNE kZP+PkَSɶԮ"7mC>9ؗ\ʜ(=@KyQMx)6N_?o_zm3.k|Oiw bY,Kz\+3wtFHB|R%\ήi5 xgcv&*O{d#mІ#hD%)xrq17y e.XݏHm oG1-Z ]@4f42vE\e5ƽ@\#4)5r^?ǐ9[!=1anFX΄Ek*wcz@T΢܏Oqy)jU%i>A|uӣsq;= jQcPYӹn(ƹ]v h{o]3~ RQ\LMC[=7:_'`|MR=TGl'XЅ$b|XÝF^] -UMt?sH_.o 7 4wX?SROnly{T' M9=څÚB{#sYI/"w[Zxh|?je8j\e"G5ʭ/I h1X9KFi|p2܁j\ɯש8%gyfvYruP3ܼ977K!__T 4yq"9RĆ/Kg o.>(7Bt}ǒ2exn?"v>}J< }Z({_n&uYllnJVɂOoqcEoa4; Ky>\z$!߸v)mvCވ(=vAt}GvA*eƔcovQ,yI0 .2$=yg0e]۬v#SO|>^)n?[S{I;Pܖy?UqHǜ=U.`!97ȂYd1Lʡ<%PV { 5%P5lZ~Z.zWE42UFٻm$Wr۵/̇d,rd66ACI_b'[)۴-٤HJr`_dTXE 뭅Y~QSM| v$%#okai3i>W)yQ-.{PROkh;3-%d:a~R7p~5OD+%o8Ep@L)6#K(y7PtSv(!> #~J[Յ ,!!V^ ]δ/.!.?Ù#Y:upغ﨡LvǶƈ4強jEEqw3 9#4ƐW~1'tf+xjZGmG-Gɰ5E)9#VHMM(lO[i5k޵}B:W{ܴd}uZl#VRAKQ_~߻^|LIuuBmezI}= A:N)6I09D`ks n|ձݡ\ٿk?g'!@+nŇV!y^dN }d ̩@]0;2· 0yL#B &,d6)&hcG[A@P?|oAPŬlH U}A%QXVJ@92e^e cPa*$"%J' D< *;2 R0Xnu*9^g D=vYXfxԲSX`Rl`]ep Qƭ [Zuv:T. Jr$,H/Q/oP~4A!/ @pL./@1:/dL;xˁ",*|׊4bq!CNdq/ljznZQHuĽ{>^O^u0H;ݥZK^ylZg)ѯg X?׶`j rM뀉)m+bfT/fM^wy\}mD[T10d{] ڑA3֒zknX1?*n*);ե*#^]>a0jEӿ_kv 7Z/gmJf=VzHr&bS/x7-[(. 6ѩUڻw pn1,M4Ħ99nYzP \L'>m0J07~nѻŰ 7"X>>nZzP \L'>m5"!ڻrn1,M4Ħ0fvR{bk4F#bvR.|χ 0dvRK̈́~/HVQչR ͻv5pHѢoԾ$wnxcڧzڝqX>CG''pINޫk)pPFfȕEg(3%Hj-sL^B@E9}'CG iJ1:+}%SڧWLVD #\ 1AYڋ<+`eQ"sI:ԧZ_6E AKGyIӤ^ry97=QRv 4{(~]-6/0-)kSZG1'y͝бeO'mTK&^/>|r %1^d~;ˬw7 6pjLqb\DZM1?)~zr43A,̳ J-t (7 =?<{kћ9~\ @oɷ7xS֐?Cp`lu^7Fj[A3(VEum)Κ=duW?E&*'1w .p}D.T;?] :/pUYJAGח1յ>o߼*WUE3%(t*R Hp4G"ziN27X;Eewo|EY^Op h%ks4gĊ2\Te Vy%d:Hش&3`U0YGh9d%Eф2CY8i=Zv=BߚIwAכ֏\>WREY@eɪx@0Cπ8.z(zV0]aX41KdIocpr̬N!$fve>I|!2^0vf'NqN#[gtTeItԎ D3'A*n6тOS_2o\,anǬ]0 aW+xy>N>H&\̳;[NH7hc Zk+?^izCtqIǘz_a8EBQzϴ/w\zcבj{ȘA'}lA^ Ϟܐ޹%twcr[bXdo)iM>ji{Dտy<ǵ\η+Vj@|֪ʝzWӏ Af5KEV5^ܻ8%XjE6G_?MS8J>|Er4ՆQVU{iǴeqlI.G6V5_ݻ`r,q6ukzמcK8rd:[uf_ӻ3(+o%[PA1̖ZRx(ͯ@p DT)[K^ a(|i= Jj4|F/G Pg"hkDnUeí!n_aYڛuּjp(G=ym<LJ]8('oԗˮZ/wj]D>Bl>ٔL~mk_7i~?ܴy^f{*5UETUEvYͱf+Tܮaеnqm-@VMS}ry^eoP$b(%,t{ ]X{EW`jdm`JUJ#XeiZF9ӓ\u^eL!M.<=M:mԣ+vXm(4s8t:6cϜ3i)^3^҄}a=N6=cv_t[%蜏 {klc^K7~,݌`Fރ.ǏD:icR9Jx{lqe^EdoQCpr X5tѦ't;Eq%"cĖSKP4b)d~J B]0◱q8/.ڒ$B%4é%AOS͜6' El{a- N\ޞx*?~ ?N@Ӻ7[M({@Q! F&3 R'[gO FZEIЈ >JBO LB6쿮>ƞ#k (ۣv?uLw,F>O7ߜjzj-~mu!՛y7I؎1a =^w7FHBzx yl'P3OZyA4bK@a'TCK#q ldHg.tcryz=F>iAG=ӸK!NeS}k!䂉-w$$ $ЙN">1) }ةܓ t5I9 (lBPŒIue{.dZN8$13%.ɸ`_fRH GԻ2) h Z.!ke$vľ~`*YoC^L&7AԁEŘbh>D6A5RIvu +qYݕ) ۱3X2[fD~d Js)"P(YII%2aYBR 2QIJJPCYrK$i"9LCfCvA6`!TPT\XdƁJPRfRe*`pafcGq%-e4 ;Dl}8OZE=ỳ?b_8p1O>2 + 5[ ezƵx}$ֱHJٷ'^U(HݽTWB7nRJ Ljz LG#d~Kу& ɌJ8}xzȦg*FtTz9A=b Ù+yvtW֡{?k~>pʼnz Y9iyh鹨7PڎjB#@꧒:P |zN8ޚqciQ1FuX=/2A.]62hL Lgd:k`<&s>7fn{h+<ډխ0=\82"c?)nt=htօB+Hz;^&q<޳F{$pPBEϖ?OSsJ,X5'/p?y![>CQ0\n1KVPATOӻ>@R9 ];0=2{TƄД޵5K*'4[d]˞R bsMZc8 ilYt;@fZiUa \a5Vo09î2CSqv3ց3g_nݦ/WI my_AO=6=IbIэTt@1eDgagN$vT b>گ ~De.nzTpXǍZ΁@sp%j~ uwrq+; x@h8m-mFNR:ZeDqvxř.8{[kIk،h=l_?{k;}+_NdI*elZ!ԚJS W*kٳYOJ/ zޖ^S&hσs4JPl8bD !D CtF ut_O:d0עn?+*ˑ7cn0޽//.a1꣙^W1.٫'<1W~FJHxeC$co 7j'3?q@BO(G/g)C>[?1gS7l&Mijv:dD0uXK-&G0QT" >>am@Na-u'4}ЋAq?0S?Ӽx`Wi8?{ G 5MS"+=7g\)Av2X˜ta]@mT7^noh.I00 ZuSCK9NXN$'.%%BӳTҀA@(D!F1>0+εCک\x}p% "ZI>0GcOo< A=g}Zfr7:Az&4͡ /q.3"iU$' [pr5j8-e1G&B'/%k /r1O94sJ]BYaT`R'0<8#K聭?_ /b{Jxh v?hpՑYWϽNlm[ԞжQsju!*Tc={*ԈlMeKcf>5W.*Z\=pOpwTVlk f-^ȋ47lnn/`AY _zyx "܏F`__8+ zQ-ԟ,~<룀d"֐sӃDR2"8/J>(:HN (Ւ(z6Q5!okUJA[@)q['Zrz?7J4D2^ӉIXQH'9&H'j7 >-|K0:Py}L|V`Dw z^Aj=IT[wYoJ-XoJd|K;)Zkz3#.IW&~C Z/Y~cHuۓvj-(IWcT<.):Na-{.ZAb**b:Z?MnaXgT#UGx.L,ҁ3C0Y"VB.r8RG R4 cse ^ʵHt4BS FJMs/(2)YN,Dc9Üc&u`x@ j$x)1E'`՘;7K54i89^oWT^܁]0+'fkĻN0~߶;2iTBPk@}`]FC$<אkn%amc sޱw,]ul=#jW1=ލQ%L/fjiy pUwޖLm^'qx]U9q9xSt{f*w~~1{rF1v"-fe\V WEaX{p8_|ύGٌr${FGIHtx*/JンxHR>kY&YΎ 5ûC.{kŕk!W8k>:zv:Cv1:&ZIgX{N #0k2Ϙ p$QqbKzL:)/u6&q,zvXK(ǩ}TCHL%%[\qԜkN:"4 (0X+c)2!0Mv\ fߵbK V!уU[ETHoxAz}}T!44F( Z'5'fUڼ` X=- l\r5XV3Me(~IW*.rl{yԃ@pV:mc.tmwԣY@ly@jAgU^iw&k7CwᄍQd.T#jWcΰWϻC| Ibد9va&fU2Fm~M6.^ĮҋؕSK1Z6(Gwhsc{Hña$R.)8g\֊<cm" #L{Fi{kN&!%TVI~BLڱZ)-$[u tlv c't{CpXGOL&'4\:I=CRXW;8䍏AHJ*j5LHzdzUj+ghɻ"nhF,_Q,co_ AʩhfkE6 Y>qS4xӮ4.T9jf` j2 8QP۵,w^ݏE\.Sј$1q[%Wvid30ʓ#FϷ~ e]Ⱦ h70lx}KHleaܥHn #̏C$n4@niST2Nvw7f[9DO'#-/쌳rr"_y,$ iimȽ`g󐥱ԕA61; +v8WÚԻ0:Dg|s v@b+oq5Yvȷ9yw0+εCک\űZRy xj^ƵLXsj|8=[DI8?NjP)[׈_4E42pI0^S0œ2>8d`H(@f\P{#^,]T%њ[FED/B2H#K)4PAckAIob |~hA-c7 9 eM.hU`0 ,xCYb xu4XW266QzHZ,8 rJ1[Kb{b`FB[̩Uօ8EI^Zq%ƞ ƭLz0;Pz֏FxKBkk&$âKŚ^CQ#"yn ( Ğ!B* KoY3`yA:{ JXuٕe[( Ѡ^,+қ`f,04M^VHʤՌ$u䆛jLq8f0ȅYHV0)%) )EÃ1 +ך-._ uyKk$=nXe~ i+jhc , pc\ʲXd㆛*ځj J7bG=9H,'AWE[[L sܨT[;PO# gW;*SΛT 9&by`V ,Dvїl`[Xꕠ[tGc2LvJ4}TՃ|~꒑5 K鍊RF!:iX dEߍbjeIn{l%ە2 ^(`xKaAQt8:w0FpzClE88YA/o*}8:ҪzuJ̰bMnX-v"v04NY^i Q[5ERnFU7'UG}\Hžs>SM;;յ8a &߁]"A޵c"wS o4ݎgؙ4f%.-cy,%`%*.&릒#trչ~#^BS nyYj? ,OdL\fkmJi@[,IYj9)?T) Ltru^i- ,eR+83-g8הFvF{_TJ!]Xf@}d+*ՠ8D@k_.^1*S4Qѷ"(jM]{ WsuQ)P9<#NZwVcCp LW240bJ$^ +e8(Ƙrt{oh pWȞ&uQyoAED>LU@p <;5lX z~2>K%X hgnZ=N15gwc\ EƤ#>;1kWs cNvP'CIAR)I{1‘p?*rpR=xb$~ Ts[}Xq,KI~GI~&~3y3 sF,Q T<QVRK0J:Kb{iOn/?J11>òdykqVx>)P2S{5J6pNjoa]F:Xs ƊPJٛO4Qob.`iuiy!l]Zͧ?sP]-_Ժ0vWY!wd5wi7dt՞Q2Aj>. :û% ^:\=&:W*6ň@Cmg5¯s2s+Fʰr/'ֈS(ePB&R TYJ2DSugi\|.THROE+P_mcNxHh JaEsqL IrOV"iq> RTt(7 ݑw`pq^Pb}(AqVcԸϵR:@s_)YQTJvt\eĿ\wzz5JiUߘjŴQ %7Lh{n.uy{Kn]W\Pvs**[Xjun~SFwR몄w.n~1<>Ĩn]nʆ,r|X1&Zh]'Ʀʿ;c?rb9ө7_./&Mv{>Bw+璅rUaՑ|&Ʀ?cʎwqBV!1tGOSyhޭ MM B`&\=V!1![{@օ|&mS#SE\&L]R[(PBk#X @cS"Lj]`հ' kd{k#rEpuPxj#R W1q^WAq討3F> 9u>|8u;_MoP\J+,R zݚV̩k H|\,(m&uұz`>yW_|i dYU SZ%6J᜜R)MأJuRJ)mf@4gaJT '~B7vҬAFͨ$1zj";=krjt( q ѭ"lDbhq\:n|T%i"h&,&EVq_q45b%սCDuta1a:{Ӱp ~Ԋsr lPiO J%-xթ\\n_:5^(Tf~K_(ɔP'F~(訂+rB^BVf粫E)Q)3xً0M8GQB鸮#oR:ׄ;† iUTɳ  J!#4Fq9!92isS@6TIBF"g(i*deTjȍS 56S,ZRu+ÃB9ڮO-v$vO+*Ioz*| &uN),x_+:1hOPRatRTÚKcT^܂ܢ. [@n80J~fݛgm-:QP c${骰cQܠ7sڭôo/98‰ c3R[6Z!m\@ra5I]RMem9O{iΘ} Klm[HH.uT@d:tDLN| nzdM}ʇ7i\hۇqZjtq=ٟpjsk',سd7doGEc;݇7'0GNƭDU=m[?4n%R @[a]$_4kᴑ(e1X-L+a]Еf@ Yma|! &2k ]I Y d֌Nv\Y:lhz5rz4)ϔV6E(T*4uvꗰY@@jTC ՠRl3\[ǨS-֗ukKX/JsN(JS}]Z*zҰsY^N5T.VzV!J9x_ McCT*JJ R!jGaǨSPB񉀆;~n YXisnҴJcnTj[kHMyVp Μ)mN5e*qk$R~PAIz22ZRZ@T dBd( duU ժG8Fujjp^A]߷8>8u}뼭4? BJæJ R^(0+-RzsRaV [cT_שV\Υgnh0% @-%hqY]dIMlF( XΝ] &=64+1Em YSG[w4҈S…4vhEs@T!Zz``{$Z?͸cms,$K Ya`Y+9Y^p!o <%HẸ5+Y#=ҟ쇚;FV[ǻOJP A;!> =)}r?=VqLRT8C:%{dC?c3j3zE1iqP'CIn_h0O'YAD(?'q>|ty~p*M3xW)UaMЫn-{LyurNQ(G^|}h}~\Mfbg\K0'*=q$/N|h[ix|Jw/aQ_>O;IzNI+%ܭ%{ѯ6&?2fF鬕$̈|d>mlsvO8+[,f7aP+CZlRš@Xf)>8Fln/FK3b:r{|[ֆlB*rh\WdP! Ti!DhɌPL`R%:<\"D}9dB>kxh!Tv^ߝ)4sukY|T0  L-6V'^TjtT:g*NW{{ϞHn'V[:/;henӴbpߝ(iW׷۱O8)!Ju RMS8 %%]1 Y֐%̳+# "$hFqƅ(4h/~ki砥Sި3T۹KJXdɞc'9O}r~dPqV t5u]OAґ 3,^ i$'ه;sMB,[D.1CnyN]Aʖ]0w(bGU0(57Tq̉wO:7 @,}) Y6G5u_ɞ9-u?`$v]WPuZؾɮ`8JRG-o"0rjO** &ʆ&`Xw9'ҳs)b@qt,F:L`O2h9)@26ZoY L&KOly"0jNUA[֜ '[Lf:`;q[ A"QO5id Qubc& 3]0EC HȐ洍'Tt:jcᤝwF٬ԜL2b%p90[Z mP{,y82*B#9Y< qD (](bG"G?__Gn#M:ox{wvsaw޿0)g91 Jg%eP %wƷ~#Jkkjn[S}ѰI|;/ ĦzbOvG&wpq ZWj'gJ\ (%#cѫMLR tĿg>aLP!nY4k048#OqI6.Ln2`ťE98KEF- v'=SWGϚr{}c~3 !/~=~Ēy02y`Mnɘ}C<|5h1S/-9xFyhbܺ|C 4Q',N"If]ȥt#)[ؿ[-Cl [Ջ̊ _}n-1A׷tMP{o!"-.j&/o/󗙅{KKmRCɫU*DX#Մ ^ 1{^#ٛ3^@AX}zLDtgIz}c&gZB Yt ]2&N٠sH1`2JB@99u&0h;j#&;ҚC7ŵyXEmQ1PϟӰPs;Y K<} 'F4r uW=@"V81!o_6=| ?XEh<4ƀ'4vn둝-iw@b,Vx*@v.`, ^crdE'~ @hCob1g½ 0TA %H8a]tծ $G Mm F&q V6Z(\.:dJ22h\gEE Y4l$;Z٤a"xeETPI8J5#J*$ H 67i9A{ 5nǐpChQ382e+ܔChGtnlΊvn4 jphZ>%6+fge+` ʦ)&! Dzܳw殺Ӆ&#QXz1OR23|F4!(H]k:]U{A-X5ŭ-6UO=.+!#\kֵGH/[2Q!QSLS{s0 q_4l }i㷰%;秨~GUKUj#_ho>WLXo+(Sm[׷DoӇn? Yp L*g5;klƲ0کKX` YXm ̿4v W"ܞ}_.[~݁Պ]nC^gc77ճonzVMr{G Ywxs}ݻɵv 7hӴ J(߾N]i504+ nr$\b4fbl^Q Y˹pcw؝mC؝?>^"-((04!L݊0=W3 ӫdC;@%7;bnѽyX0s.l z8'+_f:%X| 2F>n9dяt?zZo9ouT[AR:^7*٨?9NvVLFOK{۵wnoo>\k`wo߼ ? iϿ+DiߣzGla) MqěZgxGvu|SCX6ϷR^zW؈Ne$oe~؅_gV#ܞeC/9>Rbiݡ,qo 5hh{Q6Gޭȷ=Q Τ!?N)gY7muʃՙ!tg)jmz]uCC~rScۊu#um*ZTg7XP,ޥåtF6*S74BY5>KW\^cyvRi'56ҡuvRWk rj+Ovۈf!7'`=y醘VXrtq<{OBm? LB7vTOv~;Hơ8&ǧ~K5^wc:KcdTK!0 cWx'eJ2| N ,^A-r&!McV+9Dap),wίd6Imϩd,9􊷖1-z}%Lڊ-vsKӫKq2j Q#g&v 'ťG@K;9()z %g7J2|̥%W.%А\E'锑g_سn%}m*ZTg7XգL{Ɩnӱ_= s-[7tm*ZTg7XӕnYDnxBOA28:qsp~ĊU>eӵ=jdm$Hķe[Fgzȑ_ rh-eyXdrbgvukoc;$%nV[R"H Ů,ɪf)z#岽mG%-l8RPΘ(Tu!+5S|G1w!vf"kX{R",)~>.tO{O O?󯿹uqf]' 3`~כׯ#8Uu+6~?#sfwndk! =O-uvy})otauMfP)\Xm07*Eh{x@?@xJ~%gfuCfftށE TѬ V- BǬCLNf".'(76B=F.A3DŽӖa#r۾39ۄnaٍ==>ߑxO+?{jQ4i>q&@>C?0TĴy҆RaS>!$ Sma< N Ȝ.eVRrl QwxOa_0HW"q_lx~Mrʎ<mU*kr;LA(_'U`%DM8zn.?aCOAKI()Wa Gfh&_R. !$)Yۏ-"O0{3vqkcY},+ˇ&^"b/2u,lo|{ic;As5܍pqz"{ˎL z$2:Z.6ܴ!>k>7>I2`k/u5wr'К&8: 0˴f;mI֒Hb'ESKh:,oJ[tP#-7R ;KLkq'0cQgS(:E={؃IU< GL82;y*KXCDx;$f&sV\0@w>D%<ƨ][G ϝe XJ!͐iM"1.0N߳qH 2`Q Ɯ|[:#(@rYLKK6ȸȤ1c*2:e)TxJ ^^NP Zd/h׵ j5eGGR6f!8cJg.ӋͻQrҜ {RCnr9?\~xjX9QU,/Z~oE+LUזOhnF-mjLzkPb{fI 7]U⨛u@t&gY.KېHldD`F*-*.DIyˋ`1_N;RX{Q! L~]xG%mS̘ h(- %8:z A=G9nNT, (i}VQiBym`A$r hHr v:Emp"YGn%RAJc3>ql璋RX p?E b,y0%QFeaڍE.PKs_"Z#:LdD^FF}0GV< *'cP&h/tsy̐֜o>fV6} y9qQ*DYYD//YGH5!c`!#m!Bj#7g{H#*@kr`!wH`eBBK$t8S^uM؏Cd<:2kS9˱L9hPqJpIÂS[XT^p袦#l׍ikXc`EMA 5'jN ~ `m3(vKZ|k%L9ԦQZ]hHyE ~QI Be3Z2rR˧6$AHqhߥ!R+8%"8!Bo[B[DE@50 <ȻCv&ؤy9Q">KZ4(Mi١t gxcJDA`«Y2c kAyʋ$(Wlp4t^dWBW9\*ıN&(MAqE#PPUOd`w6YKY[|oG??ʗEPox )ЊP]s!I#96bx^F-{Ӗ%i)J1@`{ǖMGz5}E Q*-@c&\QZ`ưB hdEO#u^lo`BVl[XfKY`L!^%gCnWjRj{_-=Jy^,QAifє_0-X' dJCf(<4-?W1SwQe "޿Kr #'~yy@yc(]t߆TY'檞^&M 8L[=PabL$ZC=$rN%M 0 ZiP2υE׵ Հ}'^ - ^ﺷbAGhrGSдUrtMw₦ӏe1c5EåM 1*4ATLT(3)ڱ ek:Gj!W\N3tM5°~+T/h4`84mzZ`q3o ޽i8L08]r2hʙ.//"^!&MB{Z}`g04N3$T {*IH->š>c{ q2[K죖6lZߛ&V|\wxNYtw_=we{IFLTrv㡨$Innұf_Yp<ҶA$ag_ʢ6zQwh tzݸǕrڷB TܘI!Ha],DžH .ɀRu1P(S.˝.(eT}3wG"][n+>6Ed`E^ppRǏQu٭Xt͸arO((:*mKjX;ɴA\ԋO(K}-t۶zBy0ΓC&Dl'cg,`2E]TiURޒV=c-yxH9J^gMrDׂK]AL0:6#JaNyz>Ŕ[s8e|)dZÔ.V5SQyD4hՅ[Rd~H6[=:>۷'ϔ1%w錴=u+faaJ>͖zOYpdA?͇u0%oy|&뾁S7$ǃq&s1#tlik6w̺P߰&o8I7EG1:[TZd3Z8–9BTJ}=GLцl+hŜBe!kvq"/\ʙ*l4/lj<rs"wO?ѯ\'Uåȝp"ukVYs6%k |^؍c71尜Z+PuU~PfgӒgw6~\Rs 9ztc5þp ~U(0Ho r0!+|;\d#20* =q7759bPW+JyXdz/| t(oVS%PI;.6m|vSmYhO>S ZᓩK[׌ ~M"L.k~(1X%XP|Yg,D"9/NJs׽p992By^QUMO̍z-s"`~QAa# 7p3%Y2˸B}A[aȋlߟEZ|w =ѭd^oT[c_l,XZH0a4>M:jG@GZe^2]\M  )_(HgBXz7|'[3m } LXBA$}vCʚzz!hTlj*c1'i(Ο,./aRn%<ҙLJQ'FYߗ Ğ*괙x$5MP>4=7A'(Qm @1{p//ܒ7\yP\8xFW?1Hp1罽ɒͷ7y4;>ï__KS>gߦ[\onIwv}|}gegݧ/<ӿ|y|{C}[}Ϳ!=?0~̧?]COf^ͧ ?QpK\m׏kK{? ]T46 |.Bqc4 % ]jR}NIؿ/w'd~Z},ʰrv0Mq-:r\G֪@9\3QNJ]3K+gI" 3:G7bPz.ؖ): Awu&$/<۵@TB:FH}1jv3* {a883"J9[XɡQ>tB68RUơܾOؘ;>Ayq(wH ˑ|-Ϣ xRɗշM}1=z=K!SU*~vF ݘ雧`K3gb$yճ%S_u.s=1*&S1~Ms28P}:r$C1I`S>rن\p)I`'D$ DhjTHYǧH&BKGt,ܔt):du)\b iV-YG1Sp(цX VP);1h}H &"Xa 7/ˡ x]w4 {5: `$6b8]+J讓} y BnU9UbDE܊H&s;YU.9]c$CPb#ٴcxL Ԫp,4mr||Gtϓy "Lk6԰Ziu99zZVK.b1+i(vuiNCd݅]9 1BƯ>Ù Wcb5+&3(DR.:<d{?x0yzû*fnݟ)/.D;_LR j+j 6,րu]: .V$2wonE:4w} I='ܚB 17nu\1pb5}k ?#в 1at[+<;<Պ#lU9##jFBCG],lX R`XbFFu"7!" UUɗ2Xf:mׅQ*Z]22Td]Ƹ'LmtƊR-1i݊IN1Wj8Qh;`7[ GY=^<=\؉1GQ{oF~LrΝ_.g{Ma9m n{:zR]Z} Z9,2ΦMV`EA=QsE^!bPldv-[d;8_;Nѷ 1MEPJӧҥoW/ˡn*1(Qf$]iͣs0a4oóX#F$ܚ*-5ѯdWb/Lע[ FnO3x QKe m>@4fƵi׶p2^~WO-iT3v5_,>A|9M۲.U7~$Xs]`C 1ծ r. K@ ܗ;]zDVGzni5 Z`GsS[<Z?rX~fAuW*gm'(?Cy\B6? vkgLv^Acβ:) GRTh5T|ǑmuZDeU+8bL5&Y :9XF44YGcil5Ө_Ĥ!a_it)(Z"V<JF=yGP~q0l+;|c2& Hy̌k~5=~'S?4}ho{)o-0FXMίmpN=Uoh{٤i%FsRͫA=ٴf޿|S%/>t?g== ͻݧi81,Gohsrw'"ʩOB!+*娘قם@8W6RDbhuWV۔_} `sGݸ!ZA|Jm Mɟ)WwϽԚ!c*LĨZƢ6['ARk%:OXHm 0mP&OS@rAQR]:ZC !<&Pۮ@E; =_N/RxUch}} %զ%u;6EawVtXxòaLbˊmkmY o߸M}WQA4ͤ"7ȉ +TˎheܣL*ƥx0V@SYS}+Byӥ QjS:~tU7 UvO?*s& D=\Zm'DUj%r;kZizUd54Su尜^ Q jMԕ}wY5jm)ǀ^o`NL`ژZ#ֶ-:ͭb0y04̎UJKLF5ݛHߏ(L_Qэ ̈́KF+f&@q9EUw)u'v+VuRV͇smnoBq )Jm);)lva;inl{H)4O>Dn_6?-GP|vTy*cVc-ƀշYnԃ45=V1df&"R+M1}y-;dNۍƌDժ=زd1򙬹p:yy†.;dRbqʙm^KYE _o?tpF`$x\lˈM뛐wgj{ܸ_!+ _77.9qO~aZk$q=ْF-%&hZEX,w* EsAh\[ Jd@ hh}w&c?ijSwE4p\7V7jZUA˪BD@ j[R4`O_+e|m]2|+eF|EviPԭh29uNp} B.vCBL6^r~< ?-•*_ijvwO:ʵA87.|+eҀL͛MsȻ\p)WXwvQb%U[;wޝs]oqjOTv'XTr Q,Eﺳ);XH;{@ݩw.Iq1E <<)8hzwGT{wRtIZ);dd7Zԝ!>LL*y!;ۻ( ]ňsANhlyE]Nk&w;~j}u IRڧ ;K! F^Thg!m.Vrt *ڦ'\$aH B/C%x܅4`áyn0K&m+N0Z2h8` "+tpnM 8)rJy @TIVH%i2Hd(PܮwX8&8!P*RBd)QP$Xap.2g<.JmKnP*@D X Y_="LߠPlZ`i0B0-cjYrH0nW`-Z5r9lRIaJx^J EK; K}DO=K }Hwd"ޔ7*->;LC4&<. 9 nR٦;{Hk+F ssXl%/jk7FǙvk&Y[Q}qcLLn(Q4\|{pޠ~Zr5WJ lzj cƳj'SB|dZ@׬&}?Odܟzc|jһڻOChcmwVtٸA-b1 d_n6^>LFE6m?q=sO~:ɿ;az@f3D dp2}$'yXP9 f{C'Oj\8T28zbfoNC[+1T@]Q^˕YeJuD6'zP.A$ЫeLLH*'*iez jw/QJy!}7O~_):WoVfzeMhd*5\\8c r$F[aozRI/TO73*Wjʱ )~8=+l?,?xZoX(V+3P3_qA9\ dDP +$Ot7@.J^{BڻPڻ6tR%'/0ӨB,)%ǂ6 Aŗ@yHO~ӛ#˥3݂߹v '\TSA3ܾ n a44ٟQ@3TwTL9䙞nGOff_iaχ3;3̌so0?\ ռe+Z9n!WH}\ٲS-PBrZjwޏnզwO={ooQ?1)}Ն>\YO('SyY-,XY#ͮ$/'Y}yƻD;>Y&48A?aGfw|}|W6[DtVྠ7BZlm85_q_tlmbͭ @+N:C x(PB <@tcl!V9H!4K 4F@1. sGk0͋X&R3 Ä͞Լr^;=aN1^m5䡷|Zf6&T)3}'Cy&\;R _wg Lξv tμf/IgQL=i,ߜ/S}P߳ԞM('P<)kIC\E]tJ`Ǵq@+ /h\1Q6X)dmݜ;jkА+WQ z{ Z77 Tǭfbie{ۿus쨽usא+Wb:jKvuz5Rўio2~.6;F{(a 4Y|nGvOډ]y,:" +e{!q}p{!C}{{˸K2|m1Cq)@Rۥiu{7c#bo%y5ԁ/@& :O~$ !gx3Hg:J JXĩCc#Fi{䂉C%z?1R$?kOb(gnU)F%!n^ p him;=o4yVqVÇn4;mtBrR;sA|%8Gd9vsGw{[ >.Np$&פ|62uߏ” v3nRpJ~ @?hԟ|Y&औX2 M#=9(x ib= 6 6B>(@<* 3}$8,|페uz X˹O?| eLt;)8H/4 &Ƶuz$ Ǿcg 5~=m̓sʮ8jł;O|OHcyG8E@;#t.NLb}B+ mQ^<n3w$ЋELs3b1w{KO "'h @yI,HR(I )!2ԤC((,0g8I g<{I,,RE PM*S͗cI{2I;ΡJ3a7D~8߿~}$o)Y|>M-e݇J|>4~(D\siZֵ)\H4B %DIIU46kn1HZtUz.!.1j'oUV0%5lH);Sok7[  v0Xx8w#0̪HȖ7fi}e{s-qcW8x_nI^'Y7ޘ](%x>LG<$Ά_kթ[ eU9uubmsնs!!wB싚eŧT;u[y6ul l2٪컃Vo/Duaڑ@ĩRBk/{f۫'_,~.E )T_(vo@}M3GSw(Hո$bVp0鍒ŸGއ]+˽IJHMV8wս0(Q0TΑy"+x^B@eb3 еLBes0gfsqW!HB.}:}|֋JFR^3wl=~1Z'1RiTyTV,xk3 gC8}'Opq7n3v8"j;M4F ˦3t\{ņYU"@ExC+Wo&/hݳXR9c\:+%D^!WʣKniӁOqozErG.^XĔ1J]$^b^|'a1A$.i,;>d/v?Ѱ&CECy ]"Jcy Q3n`Cx`CLbHTlYY 9G]:!(Q$`K \*l4+f V"hQI'*'0($m׏ axMHMIuW㫝+5{ Qxmp/YH N,b!)aVqW6,TNVĽ]'2`jc-]/`q$LjO!mݸ`I^ 9Wzn99[BnRDٯ[:P ¹\: ]m:cŐTpDqXV|8)GǗs(E!9ZwI I`QzP" j :;tb;rFm23սN@8J,OMbQQθX$2,{[2JR@ kG&6!BJe@R\%+P@0eCBxl]@GDԧpCSL@S1 S-= A9”%p+J ;C`!&K$jT>_R$p[kܺLwb;VFsCϾyX,ZLGInx(]Llm ˪; :q~c2=xf~US|8@)$0sC=C3)sR%ڃpU|Vw!HpEe@/.ѕ4.CX{Kibiʍ؟n`ao=m8Ps#v{7W7P*"Z#> H K/ұxaG.imj' Pɜg0 #I!HqB)J!Rdd 4 I!$z( Sl'}Oʶ7;Ϳ0@g&?EQЂjg&` iND0ۿDi!Tc?{ܸ /{q/gJmʜd_& pYefPŠ@R$550_@7FlGSgV-F xbj%EY 'اiY,4&E\D!y1X}<"Z.lh8uh`}]+gDȼfr#޼҇=W6)ON_oVأ{Ev M͢#3z@j`UVҞҚ! 5&;5>yIOfZ$ܴM CcX&eli]c(`j5`LCĔ]SeSAts#L-1f:RWo|Yr]\Sxp\q6l< kici U澌 3 p ) bJgv !,h//)![(v$9;+\80KJȍ*[/45ԕ<-m7N3!,u&(D${sv{֠G^ǭOgCDYs=蠘%̀[Dgr60ٌX+[JjWy$k_OR-)lFۻW>uQK3J  qX&)8e7h}£@iI/ 5Q! R{V]f\ll3G{}̢y|ζ;NK0t.9#q}۬6CXo7ͧ`\R0tsc3˾.lj1.[[^]wS3_XgZ@n1캗->P=*9䳙̃h5iW _wJegn2QwTn, ֬[xuCC޸THW=&d]n2QwTnEX8iں_nuhWђNy4#@2;ǜ:g1S.F.vT15jO7_v8ouG ^w'i1ZN۽jm2_O۫Azm[I 3Ò.vjVT{ ƛ^gk _`@˪w hkg(A &KElaӥݓ=Tk^2f'2cogX|(cчcY^Na:$T|RTndԔT\QN]k öyu3⍿[E_҆qzZYPBg ~uS nTg_jR,I76{Jp]9ty٭ƊӺ1zۨp N󧰍IyzU,!Y0s6`~/M4~Vs¢Q~9e tE?)_;9FPy6,z֩j[+5ul"|P͒RyTtգlyx? ;@!1v慺!A abRH<U9GX Ӑ"BEsϹUi8?댫 ,ֳ-)ك mY>R` 7r+.nҡݪ>9f֑}6z3Kdኵ[~waj_InpqsSXFRZFA>@BlyؕЗG!2Hpz&W*5A31u͋7JwOfxht>&;& VF5OotUQ1l>͍ RI zJSȰHP$eIBUo٫Ħ-d9RmoQ$T~w!Hi3<4兪W4W=BrO^M3,'*-.vd9ۑ<_Wuilb2Z@^@b!^0J0׍~IAqͽ358&wiB!R8"Y38W7EÈ u8naTEhܳ/!},8K;ډK %;W};G}$k(E%1O\]0QS)T\+F( E(z'ٳ}\X9%$㱈EbNI:p(V P X8DB~e]O7)Qƒ+ӎ;vIJa .喺1#Roa ʺl}@H}1LNcuZJ*^*L&ݽ2JM>2R`8F:?B,*yju/L+<f}cIP?*Yc6qF4E^E޸xլؕ\LM4^'z O"_x'ygvWS0'h{DVOqd.f1sx2/w2Vnu&f%=d4TÔ#3k@ԠG\Қ! 5&;{$'3[ yUMw 7ͧb{*|E!3HUỡ !zJU(Z `RU'B6(/[WS7Nsd\r$ v@!˜lVfyL/o=wpm=+s 7?kQ5K we}[u0Y. 5I sUT>d]k%nrfbArxgc :=G^lw U2f8,E\TM澯M6>헏 wS5{r4N}e}9/[K~";8_>zͧ3(sԴgp_c'u%ɗ/8!"zd.p eZq"$u(`1g1-WF1T 4`*ɉ}}0L 6~Zśsj1v^p,ތ -=FCjjek)e~ZJRPiSKQPt\TAߍsZ@VT?_^us㜖R:~}(T<᫖^ڍbZz2՜nS^z4[RTKpK/XK!>Yٸ/PC$W[zZ"Ї@":ie(^bꧥ8=Bsj)~ZS-5tZJ zJ%qSsWIp';i- h%jϫ5CWg,H*\;.,qT=C@HWU I*wTU@\-Թ$LUQ HpٌE$.ΆP!gr 9$`ZvTX0;$IRURLpHQA])cIcm@ݎ"y^{2G0 \wXXaaBIBIH!ƌA%0ĘDpSF!X)J)^_F5ɤSc.6=|.娻诉@ ׿l/>k7ܓLnPe)- J)Hsco%Zc ݾĂaFtl>f2@B*.d SOF ! JI: F( 䩢MP{@ KT!"E(%  }%p C2zɽjֶP(i ' *0aD\D"sLmwVbx R7CW!e5&wQ䜢D~QL | hK1j~u?TIq17'&d=\~GߍFw_ݏ> -ʟE vdZxxChaY"t('kԋ(+Xǵbgkb@_2'ݙr5&A| )OYݠFepZwXńߵC vU`chuG*pr _.ԝ4קSͥk.Zs<ܩMtE⼦n,?'P[jܘB@zS2pJQGjE;rCwzlfW;cS-ҭG~bZa΁iӂpQl(YMU4H(:cp[ |T'U|/Y IOVq )T8ǹk(]n2QwTnE`qk-ʈuCC޸Sµ9\{OW(=I4Np90raIQ5շDLv-i2s龼By$T?_;l]rő{j) ¤ J'fwN7,t PNyTga$3<<Z`O/bT{Pـ1n(z דyDLe++z9% 91\ъ$= L / >~rHj$mRN?%O;^ \P 9\A$ .n.UD.nҏm8[&5vi;R54<2uBAnޟ Տx1Cs/zԏGt0Q, D)P)Fjc1.%Rbp8Ŋ_ٹ.c/.ʸ]} Ew2bO)sbeٜ?r4r_~s >‘>R-o|N)JS$YԒU9d:8$d-F)lݑkZ2^RK㏛6/q`SNLq@D6:,iםǎjBPYEo`zrI]P[S\jeӜOKmq5:t^ҋ6֢aFVJ䡎]1ab9b;(ϪN) y*JLo$GCnm1pQcM/κtn]h;WV:մRr@sϺAl}G*|[3[UtGQ`*ow_mT~I'/ Qp M7:XhڭUj(˯1`:^j)rZzJ]EQKƦtȝ\/84i)SYJcӜ)wB Jh,I7sL5Xd4OAsbElf̎@?YʨWwQo6cҢd?ssns[1'r"P7 jλ\.?Go$?ԩY%\Mt5.ODt5ߋ謇=ZdEi%FKOe Og=<~C$8XXǬFEc'cȐʪї)55B>LELGO|eそ>!˩qs=Rr/[EaTP?R;;D_60_$=ĕ0wbqqS N)a³K1/nQ.7-3yu`$ ,35Qb5JCX{ŨkW~P_1 ΆPejM<Y0aSLخI8f D <F1Xd1 1-ci 2%]zxOR o̊iO޵霪U H½W!d _b'gWAcB hk\Z<`RӬD[NBꝖeBꃔ]8@I*]#piƚTV=]8@]RR3jOI]ڠ'*{*;i߆X7Ԣ3z>7&ܸni[ۜ;U$d6;w2=i#xRR\9l%l&9x3^{m k[ ̛z>vpAT1(_l;/1z`֦4ק;S,j{To"9Y1D$QDT HGD+Negxt5$@[,DkYU20US{~lw/jwA$Gd'(m86MkoME0=0{#9QtmD8VV#c(=O gѧM^XVypVGYXF״C򤯑7(Typt:2x2-Lp<Úe23HΦ!Ei ӏzq0cc&uw1u:q#i!eG7[$ݑ&Y B_QXǔK?i7=2ƁPAM+TH^㾩P1+OXڂ)6|S2[3Qp5 !ݥc)v.z r/v% +IDQILcAQ- HjivkSQ^^%)*s֭'S鶴F|O{ࡀ$behv a(]tZ]SdB^l; 7RK[[KpRR(GR"ܴZj-MKiQK1-MK cjW 9]aK;vZ]%RLݴөDUr:ٕţj-u+OLBa4-(bN6ĒîJ2TnCh I L),eή#N`M2TWuڃVGօ7w{*@ h" >8% +Ң,0ݎGB]*)?DApO6D̉d K!N1 W6|!2yw1φ}1C^  {Sip5{{".ƺ"Sc˅XɨY*ƕ?J)=&HK68FNSz pzHRQ]R`HE=@ Ee<)5(꩔}8.Ǯ,ov6;m;B"R o<~>NO^aaBTƯ]܀]<$v@@j]A~uFl:( c+J'J!X}hP`ٍGW`*| `1 ^a]^ 1.T8mo ȴT u|KXmf}^`_7ʺ*c2+.D#/w"[q0I+)粫w-j+tb7KT.nSbr*yzW:͕{5cʭvwՆ#Yͭwef{GK1A~6h}52.xG74hn*9uX׼G*^Ov8MdP?Njo6$|/bd6O2=i#6ƈxU.u׃ ^Nl`~x=j#1l4i'%U0W8 ybf"BLt9E5@\փ0N;Tw %Ϡ2Ӝ0 wǴh;tw+Q@jӹg N~{cJ?[B~w'Z*rz|#[mqr N&GL}Mo Hq$Q~q'~h7=ҪTe뷟 r9=AG2G:agEtbBUt 4jPJ6H ִG:)Qsqwؑ;giBSH'f0Jq.49gZ(#J *ьfH wy\IPHˆ<3Cc %gr FR’@XͨfLU.⥍`>TFV3 7 qZ k0-͓yWh'bZp [҂C C<9E [ҒU# P)"k/}Q g>LȮaM1ǂ !%A,t=g8l(j+(Imk*bJ۞蘔t}LwlnJ]GJРK!|Ew GoԀ0 ^.wդpdQdQH%EHa1T!Ad~}[ٷVofV5{qab Ι1 yL6;zq #ݯfwZ+ʹ)"M̟ xKzZm0@糅KgуlqL R@@H̱ ij.Jpa}QʲupVIXe-FEhHCp<|VWaL  N)aK.TM2PF{6lѸ_vwdh v4(J?,oQC6e=N;3t,SbX< ?%MN68a΅M fꒄMŷk9)͏^z|}(M b*oޡJi($FQ]]xE5Ht# ]ݚATHP4'LF"BtHh;s w?p:0LF!le\#Zٔ"핾ګ-ǭ;R|)CguM$.ywcm8Yb)"`7qU~T! i c k:G*[gB G*A et'%o BؑG ~D/QTƆq+XkDrR*"bXKe0*H3.IxsQRvIdJ'Is&RQf}!HqÝwlvȪj.?LFd AX˹mʙ.ں躀( Zuv}" kޑ+QW񋒮V(ovq4bKgg(BgsJw^w^w^wE!o% ƞB"&M"%ܨXAb5jN,Ӗ8SZ7ԭdVP'g퐁}L) ؚp^LL*WJju,IiMb ؓ H.~$,X pATw,PQ5d}zꚌZrx CKo-ߺ=j]jYN3U`3Us" I!OWH Mjf"Ҹe1;tF8>H4C$*MK:Cǂ!q6[Mn%paDMn:qB&%`)3J8i˅L)hL$,q)?Ҳ8[7b[lQ܍ zow3~l˒:zv.IW2P~篫r^оT@ _?M^Z=J!NpθrG~ ""XBvOw/o b)7;̄$Wnݻ0?ATFf6yi)O9x3K.,߱0+.}a&W`PT$\"lL W`#mjKJ-XSZ-au24n+f#ҜlS5woB1RAZ"> "~?Pp7?37=pX{b8$'C.g%7mqmHu5y ,WCׁ\nC7U?v~GHKSNDwS`ΥXތ*HTgH󥝘jF9m S 8V> ;aV: hfW&\Կs_5&smE_toETӋ\qV#O}G݀6ylt*pȷұr?IZ("!Qajnkf[nxn VT{w[I1pz2σ3- |$5dn:}q3$yfRWzxsvʺYNƋ͏G/6n/ro dԭcsnn}>l~9_ֲ)5T]NS]\{uG7/2)kHC)vgota&9uAԭu;ߥ8ӝY f֭ y*ZKxsϺ ([] BTnU9WBwhXnMh3WѓuJ!C]KevNxuz9rkbO'}uboTL7/f: ꁏ'E˺:nqf/5$V]xο"1+ߎ|<׻&>OWsI.Q %/`qJDR:HvSk֡HNBH /=J609I3PAvsH'vNA72#R<9G ϐ r9@AD12RibXe@RQ ą1~,A:Et4\A)~b ;bZ20k@ ][WE}κgR랱"p2u&BH'(lbQ4gNp`{qIi$_=Dvv氊Uʹqbc%O}ĉ `iJ)2 RTӖl;U9cʵ^C '>!q%》B#Q0%`L  E, &U1K=i h $F .qx1"+e_QKpXR 6j1}LMu BSh*YbSdVL ,(MccI@@:07;zrF+FP`CVB#C2"rP\%H#| HlEz20^\\"aoxJ[V(`zן j"yJtHmB$حB{@$$\RJ:#jrhQUpV k/I]z|w U0U%ԅR{xOHc^. ;URՁ+9tȮN݄@hTNv6S-S^쪚cuGvu5 &\h쪒D5y:5TGs0UA mRnj zi&UwJ#fXr>S`+;mWT@`8{5 9S?MVİW:\[uZ-ܐM e*_{VKmvX^ER_^͉8gv6ɭ4t^5vN[G DUg }@&4䙫h-:ٯÙzϺ A%XN:Xy_^wf'Z&4䙫hL\ǹgݶ8hb:ubιTY֭ y*S,PXj0UX1p_tdsU0zeeȟ.>29B&Rcd h_ {0ډ%[o%8(gXZL]oO7 #ͧ|r;ojwzݴC3͓IoLi\2_2VN]yӟo4NxlЅޜMֽt= c)df){,(bHX+"`ŖQҒ}fhfm#Qrѓ<8XhFٓh;ľXmK#Ph2ӄqJDŌ2"zJDo,& jfK`\o-^ 6NSꚜZ"UP4-6bV %:XoIa߾H߿ 1s>Y m$XJHDZXLX5QcP?7%_שU%j[sXƯ+|3o}ݻqv(Ќ9{蕬C75<7A-CV WգwS05l[ɿ?̭V.\^-f0/m,_/YYo{쌞Dc5vP6cCkx_џ -;u4tldA7?Ssb:w׭̪պ/lr \n|vNEަg||.o @t٦Z@C)V7 w2&")c] xà57Ogo~l9"˘hAOq@pB$S7Xq4i 7OAbt1kgw?f|R^u2Av0 X5~qoWÙG[V>;]< .Gi&IVj3~bi4y~kp>򷶬,d-h onn3Wde]U&]q3v&3"|Sz3hr?p2-E~.ѭ_?6zO0G%=ROǾ6['Ič\}^C,8E%V?V,* }2gA,9uo&bX#1JydXGJU,k&xŖR.CqBвkJ<#bT JR ]!$>[&dq"gV#`OB/1ʐX<ix5]Dܛr>&khW@W<&{J{+-Dˌ0dծQ q0W'qP[#]=r)#vu_#sUvL/ܹZ25G#}.юR_-6s\8 ]שs sTZ1{ZF)Ņm Z{p"1ہD"[2(-Pb`c)\&APiCxBfa"Z?}dUtTn q°B #'CJ(=]B"O7Ӕnk,~ӰpgD;ͧ7 MpstÈj4MJ#eΪ=BxZ LNNG]</ Z5Y r_0~sÌcGf nV/sj76<©O?bESF`+ar) *Du@UT(,B(]J5k@0(x26 Ò%"Mx{H <~! 3@^^Idt+&z c.~~$kp=*n|GgJ+#'x]h燇]912y8~\S˻=<+w5J"5JF~Mj[؃ܻs=I 04B3?J7;|ww$pfw/{#u@bQu<L[ /IQDEf(4 /e-Q`Z(|G%hsꗗ`ɎؤJ^;A`­?ȧkC$jULyI22 Gbe=-sLšMO&W1h%`gG8 !;ȭUEJ-nU/|ixDoP&OnvM 8ؾ>\Żpn4i>S6dK-.s4~)}@? r{ @,_%lrrAlR0̬CʵeaSCPq異kb.T USlybp?y{5YkceWOQ|iDA Ce6b^5T4k2?Q6|nJ=S 3;EXfRfޯJrSD{aH( ]}m~[JvS˹8l9?>' paL ^˼ͰPq춭}1WVPXƱ/o9Bx rc?Og>"_ QRMMBznB[.?\!>\C5>6eO{4 akdo 9X ;j,c?~:][tN  +b[ql$cF%K NZ!% l&$e*.?%u2ا;l : wؑ;ߌ0XN `\^[}T)UqHӓÝhx!򔻏M;JaJ;'}4D܎"X 1@(:jSgTcOw*!mSGC>iZ魥ZT]gt J *Un57R3!ky[8Z?AIF+?/*$spu("%1F-B/$,QCFYD9\@ q>3f΂K!凊a5(5xV)Kyw5榇WP5(I>\A]-B$G8Q@WԶ.D&:b>=t 7 uǵ %ɠz-Ŝ㾔K6 S?|;g%(iD(2iwGJP鄩˝x!e doμ o;D;Ǒ1>#d"Ilj tƥ_[8N}ӛ?=>Og*w8"}Qg4\0 BC2z(nrrne}̲лpUYHƔaEJL g@,*hUn@k"@Imf )^`__*@8n=n9W9 [#$:NK_ua0"ws@h= 39U^9An{8ERIEic5 PY ֢VH4;I0⤳v[j=Y`H'}T\t/C;]t͘v?.߫rS;XbU GW@Q A{篽ĔV[RyKv?ighw -}DZkR) L$ /=Ėc=ׁX{sE爵%V*ĸeRz B'Qn{ pZ:h՝j1bQ tcS'֍eNeM2 АBI+ ˇuweZsQu^[T8+h r6LLs"9L?s%BH “aTZu$w;_t*pg-~,һ5a!߹&ٔ/%|ѻbc:Ϩθ:3~ݚDSlJ wØ»bc:ϨθGuʻҫ#[6ER@^5ĒXCc[ w~}wJ,$岡>@vW&7 (w+_JlԡPx&JQj/ HqTp/I',ST(F{ B$M vfM5< (q$8J{[yME Xl)`9j$:ݢh\HVcw"P5Jjƽ`u<,jK,LCiPK$I8WJ6\!ZD@H3͔1#ጒ+ FU"'4X=j:hqB)8aQƂ@MTFsN3K;cm:)w_,N2'9b-K(NKE2;EXfȵ"6F޵5f4W!R'9vw7/RR7ƐP!fCKIZ"G׍F_cԄ LJTs\WVread}:+IQ̥:|3? f3mYq}Oa2qxm%|D$余e_A#BsG-sŷ~=gMá?f\^hUP1):[A |CGg(،QwőX{aΩVtdrRqq^Ra13:.6X;3bɜd]-=0 08KULcYa) (TU Z kr*u' BL?kCf*h7RdʰfCi8"6qZ'?dTcWFcq~z;R 싼|d_J.g%*꿘m\vIX:Z,} %AT)V*o/ˀ ύF +I: sjC u$; k5ǂ }>lMzY- q~ 5Z>H(VRK XV(8!0- Qr @bb6V0#͊Bql0@#7JP#=Q(`n[$,F!$(H/\YdN@K5ޟ{Tr~y蘼xXf*I 0|jwSlABlXtEzBbAjP|4ڛDUSt]%6{76ZZ1T(pE䮸q_/+1 w6ҋO]Ƕ={&H#Hۻx%!UZE׳!AD=l au6_,SX`:;\}Ocn ObzH{*RD6ۃA{DqD5?@[ch,\HۋU(wt5ZwQb%(Bbzb_P2;VFsdT.dF&0;JcLE8v+a;19RHU@a#2{ B loq.4 pxE#ͫ3@sݹxcA:C@R HXK䭽ToԃTb}ZVwK1ץiZXt hsUkh/qָ5;#EZђ Hw:Gh-qw }ĶNA`1-kR9) gN)l(@$BԻz燙w+%=&kd+nepwnn]|x~rfZ߫W3'SňR=LW[OtWE[RՄګ;bi5q7%0Uŏ)iF5d>hh{Cav^k;"t?Г|:Ʈ#̯mE4R,;x[+7Kc"#fN ̀VA:}ll~h #y #3_ÝO~R42PDP8{O@1էsh}m@'td$sLmh!HG?h!j:PomX1I{5Aw~rޕ3=̃$_k:\jQ!%u |Y7rc\At&cå&J2/Pu{x8B4VQR=xS`yŤbUvf5al4m?Lqql1'pN2˸*pNvYR߱nER1b:ݎN)yޭ y&dS㡾v(p ѻbPtRQǻouZ8oۻ'BUnMX37$R; #ً-*:;ZŒնwK?^ݚgnKn`vVK)pQ׮%$b۶d:ߖB1767ӊat?Wì7Ym'Vm%M%N_\_>5է})QRpXJiK҉:NꋾU(փqR関U͎3X:TBcH*gH?|:ZD;aܣn5,dMo 6LӋf%uEU/w5 ZghqI5eI<)V,TNDUw _|;t1es?^F4x$Z:Wnw͔*W.}|ԩٿ_5ҳ@WPgV2%ՏZ֪ !U//Rv>˔q "r*A(Ii?u #:Thی#Z VYK㏠h[|oB->giGty[S]Idks#5x_@t^*Rwrn_1/ěs;LدH._,/φɕ}~xRƫ:/T1(SXڽ)XK#R)\X?ge`bUrvO {̭ ;^Ludbhb~7.p"pZats x`D24Z$+4BX`EZv؟9֯^czXY{0S0#Y,\so`l=9J`ۛh+v=PQN{VHT Wʍ[֨Di"F&5@SaT edd:35` gLJTs א|[j&[9WqrkW6f`8 𾧰܍ڸKa;)8,.+w!a4F {/G|~3Z|"Wa9zj0)H5#34bH2ޚ`3F=v}ZiU^oRq-IdR%D/1Ϸ+=i+VT(p  ba=ʐLJ).F.IYaLFHK&,X`1 B0I ivV`Wpf5~9Ojշh;QN z˧R-Q؏<i{R3;W~s5+mqcWLyww ^tK:Tzj 0Q.GSqw*kq .4ЩGX j݉+ΟmXwCb5T(1KUCRiVC-ǫBj EL1jk+TjIIl; \FL&UJ$YeDzCqBxE' )ylg۩H C &&H0>fYuQF@T `= (JO[ͼb]YçSxXUq F4.耀ysG +8b_HNv41o\&;b_A OI J^ CҲ5\GH*6Ztvk'W.YN!pr.anב]Něloi.'L5mcR"X]K> +5[` 3,8b'ªf UIa L_çuGx2,(BH4L-|D8x=~waUBKyg)}7v齑~$c(gVz^%>;Z^ oT};D@eE^aNwS;W*.k oJNIKPt+ EiCt?브DH7No(aʫyP<~Kqzb3[9pXdDWYM>JGfY1u\%ķ" %c"(m밧@TOnWJk8STIB Ld!DfzxcF9vjP=`}f#0fDN2WM%_Ml"uuuUuuǾz!ȽRx"!K) -)}Me1CRLäӬ=ϴ.I]u\K{yi* \8ۣ@(U޵Aq nƈi:}V5S^aiR!ם͉0\{cnm(5R 't GT۽CQ`}]pp%D6’!IK$t5r"$#R &eq8U!Usl};tOխL?> ĩ.Ղ'8Bsj2 /IRܤ\X4ÉAFIDRCEHEG1O66] "PJE_q{s3 ]Kq6NLYS T$. N%86w +Z^w+#[qݭ]t upJ峘ITʄo% KL0&,h J SX;M( YSXSҤ50+A2H_!o@_uc4o撕[yRĝL> Q|>՗k{'NÁB&RrFbHOkNH[JGF{_*)ӍĻS@m}r+}1cVϯ..XŚZPXkA@"И'Pu=lN3꧙mY$Kp- e}EFjR~I>$l.80s0{\nm,WI/%/;qé/p՛Nxp|\t<|I&4W}PpD@5s[k)-%UH.Hsnw7dIvGZ ޑjQ߶{SEjѧ/~K䖠I7*^i7*nQKrHXq_\0 UnTKdI"`y[0$z#Ѷm z' b$7xf~.r#9fErN]<_\Ezo/'ޭOGc'0~ w~ aTtObW9w y"#S1!(HkF,N/ͮoay,Ľ9\׻w-ɱr2qD'œ+܇p{+ҹxQ:#asPj]bA'vd( )EI3]CR2@ALJg*C} DXvRzRv T R'PK-am}]*^H)%aRA0)eW|yO,tV|K^إC} ktRJXb/1O6Dwq[J) R3&1nK&d _R n<߅Xn-]:_0Zv'd2WTFEJ(LE!RۄӄFO>IHuDj/)Ua4Z*х{ń1>&Xi%ŌT%l?jQ&!gRk9 "~_~M(V{=?[LF9fnWjT,>ZzA ,G=gr'g$Wzf:_]fǐw]BZYGP)L) q[ MkwŖG"w2SDV-rrvWVLɐl4*ͷ9l)ż`M\:rv@Gz{ON빯U D1K`~t?r/_$.5,JƾHMϣOU%jbV)[\x}.#81q1 p"XݾRErJ$ dO4`.Eƍ56eMjTzyx N?0ƷzzGk6:oIe IXF%.J!hqT^E XІsv+8+D.)J ~ĊBB~~ O΢bp^6* g~2p'Yq2k),֖hj2<,8V;&p.?t)0KmA/^׻(Th[g`S-/C y0vT|kX(.$WU ΅hѐeUI^s(-ϵT zBFlhJtb(M= mCl2XnbD*nCU,Kg,o+ӼQx%/c_ňu$^5N,t.n^g;7w`1k)v5'``)]b2ZFT;Xq꤆ŔTỉ2$ Ŷ}NTЁ}!|Wf\LR B qN)) bNƩp&4TD`#VToF*|C84K0UT_&SԊz Ʌ*6FVdHTmv?KlM A}Ek~&Z,ƭM9&) I^A? }e=Zm[wX)I䓻'iT8L_!QԹz]c vӺѥ(N<];UbUM tnD<*cW9;߁"yHɽ8Ps>䓚,$8nvqjb~,_9PVcf%kCъZ. &;g|4DUp[I!~]Ӯyߕ_hkUQX}*wz} ,M$bQ!ۜm:2DqOYdMNfCd_ן//+!!+7CE'HU_za5!rw>Mbp$%$9!|1>J(U {)!ޛۦڢ;&2 '_"iSL2rǨXRXϮ֔+(bQwZ*CWE+B NI˛+EAK▎v>H)RV$zLU5G065!btIhOqiUc[QI)-ZSԐu2Ϋ;TiS7kT`YnoHM UdOIܔqE5|oEU1* JJԒ js4”)j"cq6q!f:ml~82~uDdm ;s MUlMQQݱ=).A2cm6/6]uXk'W,qbmnm=:D!xx^}U wlT!Zg0>Cs$:mC1X؈8N5KcnQV/DF~R5<`?amF*ҹlIY>~eCZ!1j-QF!Ts{[#dõU/4( tem[& F m*\ʀuZ:֙#^i($H5`Mzx.yB"uŻ៩+M$K.d 8V1ILFD"eiЯ"۟?@Id__\݄k8_[#HyTe':2z`[ $t[pOn1t98W4ag8M8 k%)zDK)T[_+ uR^O)zPceӽ:=|M5y@,GDi" Gk+Mp}{[8:RN8: )^vWxqO@ U{Yxx/p5LJ֢)kl5%K82Yb"BīB0A㄃tqhJ"­-Ge߶4A'Ec!]-} Yĝ%MX ve`Y T0&)T6%NӐS!4Ŕ ?V rb3)9ƕql aIxLs ʼnռM9'jo@nS9E-| 00Hl`ާ 5y8Nܾ0 J{<ʼ lRD 袏0'5# AoY AU,T3ԥZT0.Ў8p9bn.5BMa:X 纰Ҧ} M]iڦ-!Գt᫓fRJx{?P3 OxeF6ʽ } 7N{h|љtC*>p]ǔ_  nAU},zali3|9 0z~ɏ_NpT"X=޺`n{>·kO0V^_ Bx?kgaʏ~I;;u<2 pYlk#K:O0ݒܒZvK"9`Z-TXEV";{SK0}tz矽߅77R_oX-/.w8)}-|ʤ3oSfĹ^Ʀ雡˳ٛ0CkUŇi}]x [Fgﺰf/{&uŇ@Llz.(3= ʥ5z0w-h/e\ \5I6Uxp8Y =-eKU?}eb}ik?QgSƤ7pߓ<-0X;{A^F0qzaH?/PJ ITC:`+cOjz3JTϸ@ XQ^^ޟ&sz8[/g#v@Rg}^6 C_NL0Cﭏ`P;kг+Ŧ6 O}VjKkw ѿ8HBѿ8G_q/C"f(/4|g`8gYܿ(8PRhSJSL 'v0"F;#hd9DFZP`Bq2J`kaVX"8#KX gi,I}ѿ[ZR`rLv E, qBJF,1OEJPTP.BHtb,&21(ґg(2&6RP{JsCװ0vJ bAbXIsaeڸl >zMb(1OZbj?BW2FQ TBn d4wCl+ W)M q, ?M'χWɩ !.X|I._Ͽ? X0 J6.MboF'!$p7X#:}'ق.]E &V;7?AN DL~ zCޟ4YmOSF8 gVXQ{A8MZ{"@c&tsx%HqFaлˠvd\, ? 0@e`+V X =\m# LHƊdSA jY͂0HNP|[ޢFD8B"жdQ$Q$ ٛ$1)>DPCIDQ`Vj*X(x0>B0W Rb{QARiŒD &9 3"& gf讻h5a+4fű3@B!ezzڵ%w޲''ޒEBuI 5c`8,(l`;u/ΜzT!)#F"J*,FaXP+tSTP a֯\Q"!#`ƚ`j5  ,CV\NTO0 ߄jMBV[0O%DZ# 6B*YYc#C~fUSAߚ4,`y(^OG ! R#JY Qbh0r#ΐ7aX)*+EṪP_ߏ'0n_M" M%@;H$@CYrR +m'kOJj=DU"&_uiWsS%p4٩,M=5Չ ^IEgSi ,Wx̥y/Gor1tU _e[5A^ˀ~tXUbO2;Hk,p{7SF7]gKzlhN L3%7=s`z&G` ̈ro|S L\>S%`l3S |4Η{N[8p\ZKAiC E5[u0#:ך }/\2Ok*(\_b3T򥽬Ҩ_ /f ix#4#=Ҏ+ѮlykE[ >;G%][1_tKOźR*|ur,]2m(B_\(sQy“3\hgnaLlF/vl${`#ylCK1ȝ~Pu^';,g#BJ|216C|D9WedlR|g-fc:cR?u`+g&|e,3M' ytcG. am-;gL*\lUYg  Nnz+oˠ=DG.uҼk]#ǨR;{se:]7-)hK-SRJlL* R>.` ¢1{ `V#8~HcCz'N=dl;c{Zt#XwJoxF:l/4tbǽQJbbJ`+>(p5uamƎ9f%3@˽i`}ǧw {Z4Г?ܿL UCR5Ρ: ,+~u;QI^~NVMŶk'2\U *>~RUI0r(vH"H`b"" k澠"?y.bb3 I!Yƕ֗Z6!87| Ww)62pٷFA#NJycg΍mz7AgAW3,9]L蕨6D!ܶFuee/0JVw`ګ{ m(&de%ΞzcjPՀBaES|x¼UX7hBFG.6ZAY[la8gț ZmE|~SJR!)#F"ʴWRyxf9SFa(cwZq)m44uel߻$een_-B SsJ zAM?ϢUӍ:ab&I8zrC7@]GbX1΅'7&2(H3w}IޗLڧ 2K&R Ɖ"*#P6;v:;h'+;-Km~3B wyP xrϒt4\STXjYE8{KE1).YWS#©1D*Qa7Mnm$Ӹ :'s502eLU"\F*з|nG+Шa b@LYIJY(LDqgd)a4F0INR}:MGwk̈ H|xQ$sׯ `Liz-60QFt˨G &Le'4U\{p1eP;-ZAln'`ޜqXx+|Z R6 s-WO%qBAE5v<*Vn81x5u3!p:XUEYf@ZSվGEBB4{s3sRL*s|?u_z\R`[_bxԧKy>I0nm綾KkgmE{9.O$gZzYݙdhP-Ej_RdÑV4 `qZ11lvxdqWwsu8)|bN3NB{ڲ<*b$%ޤugqQZja@/CnL7k<չlp݂.\ΊzEԂLQ\1}URgǯޛOr̎Mkg.d*A׼ma{nU1(#:UF5I$wkڭ-%[ELqߴv!h}lZ$sݚv<вڭ y"ZEq?<-ĂvAѩ6ڭEYĸ5Vye[EtgRB?g;"Eߛ =Qg8]ً:md&'gf|EgB6Gf}Ɯ iLdt##vDʺb-uI#d G!m&Ԁ(205rV{DbC"ls3Upo=S v7_OX0>VIا}{(DDPb?"NjXpdƱw~H%+ ;TLss#Lvt*Z3O:}L~]仹=Ч)&b=OKIg8c`m&8d=fbJ7:QE.rtjh.~"{,Iԟ2=gaǞhX[g{6W,MqwI6%ƠI6ma5DMUDQ䨩&p&L<.:N,n)5M{Sbga@idH[<*&i"P0`(FR'F4hE$mE NXR"xJ2Ԁ1mMND3A:]9ԄL`0CX)DX)ѻdRCdTFXʄ0 4~4xE L<@I+ ќ1N-5$jC~Ј+tII@d%B'Ѡ>:64b,)LbPqci+iuOsBn(b Xw MF)E,0$v`*%Rxl|co#bBVm 4:62$IS^6`iDX6.N!BI2Tm8BXU++;7TQٴQ _WQ Nj"4Y(BLjHD80"DQD'l%Ք2EB", jD1KU (0HtJ[4(^`iXf[BX lsPybF %%`ES)4Icdqs"F0ػl])aH,Ӕ%%!0rJ#=Tz[4#X% Fg/j ('4d FY Al8a#40JX\}#\1~ATnylHMh+uKcK{$ R[^I!ҤY첡 8Mc I#e*-IE::w[1-CI8+qa2Q\--O9(/`@ rryJ0f|',F#L/v8J|uWHc9?C|-d"CR]y|D7`+)qL=IvL#} }f Ȗ]+a?xOo 9)+4soe:*Z??$ǽ/ҝMEA{\OWy%zaaWիfkvUm@ƫmcyd6g^ɮא䴿v2VD [z-G|jnA҆,fK7xӶ,VCeʊ>NOEczfRcjV*d K!fMلE ig0n; +A &\Rh#%rbE-*?ҮKY=`Md.{WW;lZ{eB| B0Ja/m2[ΰMݯʞdv,e0Àe=z }v50 \$/>B9ʎ)K_0V:|>cvWw@f9H Y2e;іlخ [i綵)>Cf-ҡH@ǔduCͪOL]JΉ _"6zxW__ cSzd_> r}y~O=ߖvfj~d"MdwCrY]?R,Qv4ɿltKw#do\B.yb.]ӗ~\rlGE ~Udݯti\W=)i{vX&zt~̞'}Fb6_m%nmT-OC~[Y<fSxa ϗz?|wKe6A8>dfKr73%vT(㗿8uז\7@,Wn^u7XgwAy@8KNp^Ul}pޙէC>{O$-9y;POa}zDğ6l-;®Lb3\-՝m(Gp 9xWꇮ9,dϦ1ݯדQ5ֶ5֌w+ LظAGxu#C?\U-/*c*m‡ c;(ҔО=/ Kτpƪw rYc1V]7N{uHOwݢYAM]yᬀW C47 tQ:ĩ=$65DٽWeT[t6j#M'!L[Au:Y:D˛UO3]u)=8`U&34#r!ȏZFMA4h9>RE$u ]@ 4þ<黭(ofb;j-4xR߮}ù 2J QLqvliu޻׽/^[S(.cI`˜D!׉cc-4\#d ]2 [ښ~́o ~K#r$ 2|;!~qL- ]aԧWytCֳq LxSEBT qp=X f*+p9Ill͞ v`MJ nRJ.=,zk+clt4UeSpٴЩ2V x,\wrsl)-J)E}Hvius8!iJ}+B"&-dA"ۈ dvulhWWtrĎd`hC Mx&v)f?O;|ǯ]P IZmGٌ 筺 q{(UZPN7@i |T>*JD G\צIHIÆ16; >(m/ rEoEx!~4-*1lWu VIo@^SUW4Ҿ] l] Z=]vK6,#it7@Vzxӹr1tIpYT=9hYf{K]h|f/ƅ{xhDhn8AA$75QMK,@Ũ bnY($;°%554<nYnђKb4]آ XVf$}>~m]'. *:_$ 3I 6xޫ/jBάԄ΅N>ݰԍSŒSwS YJQo6Kb=)  w1%-ȂF Ƹj5 "7$.^3̷"N";>]3'"G_9KK'L?/c1룶}x\}&4|23][z]3K-Z,v8,?u'Şm;Ϧ1d"kYnn VI+ `ܠ )"5#x1Bc\G]a<"A݃tN݃.{$L;T-\'6IcG*vS(Uq9kdWVrEZRr~.*]0ͰgG4iv˅i6S ۾t _ >;ɸL8&M3XV~펹u)bfr{D Zk(&kA3R5ڲ k\ gvfK;@KAހ$i$v3 jOV{qX`M%9t @]v(<3rz'94bs BPI"# h3$I#+be:bZQGArɯ*˲m4Kݨ/}xYO׃Bdѯ"b d4؆O{Ssw-$5`Ф#IaI9})_,٬W LiaÓ:Q2+"|]cYC)yYozbJMZٚt՚~ڽCŀaw|mocX꽮LΤ!FKJ*SMQMQ&QJ3& aa64b, j,v3. Z{oT9̢u.uuc Πh;p"tū8" ϯsiOi^ti uxt~: ]#ܽ=C3>UE hLq|́vmcI_v'~QR:q5Mꐔo*۷R&#\QL@==ݍp]ngndzY3zn᯷ j:$䕋hL uCꪝQbPDtBQFzi MkW[E4H]wB1(":(ݎ#g/vCB^TU1){}ԱD1y%ޟu'hOޭa~;Al&{? {?U Szz~f  ZH PNZ .)ti4~i?]̺ˡ奐.X:1~i>?^#Q)doZ"zD:Y? '~a6WI&vݎ&c̍&[.S5{*~|>|ʫ=fDc{ᥐn}nԹ70>>KVEQ &qT>}yB9Ó5K5wg1̫:bjһ;ߏf|KE.a&0]gT7YhKI~8+73!!o{nўLG7m#xJXH&*M~0U^ ](/-4lV =)J*nܧmqDr'ڦ|7wW+d"(&eXt ip|aD1a|o_l~ɢbؠ̀t[tHƖiF D#2UHYǥG8yR>ԴǦ8b[ɑFccFJor~ n{ y[[Ϙ\QbU$Ͱ")wNL)9y6t P=MN1u{\ң3.>UR) uNw &3tDUls#paL*+dsNoI ɧ ?M1֘iSOMD^Uon-҇cF# `!xտ+s^N˨KEV=QzY$zXԿ s%ֺf>kCg쁜 B>/)n_pnC9e7Ky5^|3v&p?Vp麿V1V̺pD"F) K3WKXGgEpzv!ռbe[|;Աx u͛UGV]D"U3s-5%~]hAX g}de?/9U5w6}t)V$!YNFMl.[30ZKA?^ SW)bG$VIpDńJS0#,w%Jۄ& ju/綜y}s1?^`vWn0,% Ga`\Dc7'mvTXvL#q‹4͉@)65P=}@9Ç5vbva|N,Cb Cr&PՂy2 nbyNc~JufϏH MИW.>ETmL5,Kdӹ5c؍Q2̦3D8ǺYW/}\3Q/)!:ňJ!m 1&R&1L8"!16C. QRcQ.jM#.E#JX,S[.Yq-r5_j"Z\.3;BW8PrDet s0T:9mDœ !n ~8Cx+9|3nκn<{,FY޻?{~ ٽ]J7tm7o;}'y*Rb][0 /aj)%ق.oUuU*7 7O$ױ/AO7?ïUKI܂Wf3OME4Hw}[$' -"2xOiM(ꐐW.A2usFv]ngnGz9MknuH+ m[ gݎF&n -l !\Dd S7TSMu'0N^{ Հ(}:v'hX'~B};A!wN*Dܴ2]7ft_gR[yuB!3(9n>q m>/c pָlJ_{#8=eYb#62ՙGzݩ5u4Wv7<)ohS3D5C(ʼnnoV)Eafh<ޜʙǦvfcFӪj?\) ## %p~&EΘXBfݎY5ts,Q`T MRJS 0TJlbSdiJ#%fd\|2#YU RBQ_[kK,pd6N4 ĠP^3J5:TWbn-,AB^ɔ7~fʮv'SQbPDtBQF*^dBj:$䕋hLys{GQpS+ bյa(h.lZ,\hp y"%SL3t;'YDcH'6V2Inӆe,QJK v'VQ̴M ֦Tp8\ a X%ZlHJ 3` )Q.>$>pr;0!Aٔr'e˜iFsԪH RK] d#&bfRɈkqP &$KrFa)*~M?9\8of_ ""# ~|C;kNκ<}{R|ᗒ/]n~L("`lߏ L+4|L^XŨ%wwo }7%Z1$:iA;3&C2 )-9Th(&u8b3S>1L$`u8.`qI5cH`&,P;"cx 4N&\ !,m\I΅2Wa*9{jgΤ<͔$[;%)˔LY ARTF l|_h4Opzz5_n~zX7R\wjLU6vȝV/yuNsrg&NgmȒB{uS)FsRp@LquM0{ql印r=Y`Uq!=kK']4QX:m3DԢ|{g)xs[}p,Vfw@cFZJIbh(V(!zk0"B i%}Y=@J;Ύ{ylk %>[WtcP15[`˳{cz2K#h#OQdTqpՅ@j A*}kAoG gm7V[y>vn+᝝ε$uʩ]6R!dwXqp7+/y N6{a.! úXO[&/cx…|L~4xsДl@fSBZl20v9&|6`&Aw6uZ ^4t>*nl[^t2 aW;2DY)/?!CucWϡmzbRMQr i \2?z`=NIpJ)X+s{ Q!Av]Z~T0ۗT0zPS?^kEX]Ӻ@1:C @A4 ?C A1=θ#U&TהhuM=֭<GzV&?Yt2>FOD=LD2;{y՜,/pDCҤ{{1FJfGNTVM +wgos*j1N6s;gXBJ'J(+wۃ5Fnvsx26˫a[]'mv mH?.2O%x[ϦL3aKW ]@ h8.ѦڂTRTSGX%j^ŻS Ì{On'Y*}Jn^ US\7NKw2![XcdZIHbwg,:ÀC[pQn)R(Q"J@S(8 [ 2$y>U--2eɀZ1Z#"DjTi9_C~"d)٧%KMbVCJ\=ڥp: Ep=dTkш޾mSR-|})=5~\% je*/9Zw gq޼Q[+\紐vz=m?^Ku Z ݽN鯮Zvޯ(^s*()>xisҡJTԣxڤpzqҤ2N5RJY/xBߏXr^?dRGG.TPN? bt ^dy>[|+s)䙒!995ڒL;.NӑG !|Y"{ LrLtٍLqQ꥕ev~ycyYثNm9'orJ;"dL[Z.Y~PaDWa{!r#L%Q3wyYfTd RsiAc X(NJ?C{R @G| U/ۛ 9?K$ gFHb]"h̀FYeW +Ui>Ӌ3#GH?9l^VLyGZ^׬v4" N1|g>VM6Z%bNIseOkpΏgJԾI|'ϕw^$Iw2QB!xid=$G: ;Ν&))Oc)W2fXW9i ,N0sQ2Ȼe0ܹ7Xl1ȿv<zO5w׺ͫ}Z޼y%;N Ep`BA莪K8w%L0KLʎ s3:체#Fe_! Nj.F ٩vIz r}K>)HfՄ3DUNaJP>էyu3 3elWpulp%q\]K|Y˦ %BmB.~([̂v)"WդD;:g/PSYgfqRr)//A聐er/i!͸T9JSr:\ҽVcIK+YE.s~̒T! άׯD``u'Х#,!IBv Fr eDIct d%DB3F02c_qOH0*KOr:]?Bt|~. jL[^ѺOHpɁrY}K QLIr VFdƬٿ `!YϺPz~ʐMowE!PzQjǨ&ctcڟGzZt[P%9GX"&-DnO4f-n4nຘFOXdV1ƺ\@Llj%^8^+ CPQڮN)E~gU*>-x8UNVDg"T0)*.14E"5@d~w#V+˲Lc1G_Ɠ l;kX-!QL*9ۊ[\9ۗ!J{Nӑ}xwQ;,+5ڃ`(:>-` a'Bb5Qs ǡ:ZCcxJsۉx!y,6n+uEB2"3%8ĨĨThPiP03hK@,FKӄ$)T" JI!Ƙ8P^BZ!zS'5#!F ܿ+S o1cRib- 4)U"N1ь &HIPMVuml{~o&|V*Nyq$7nOÝZOǫoa)g-ܚ`w[5:Ѽ\fy/ +G@2QC1@s Ejc,wU>'})^;_MfO'z"tomvmR-RPyO.V.7uUbš;b<9`~2P 1=#%ø\]HYC\3A<"_^A@O} }')f,TMFARII ǔdId( R OZrZ:In4AjK*Jä&XK)] -v$c b&E]dgIhjvjz`G*H̶o3$!A#Xd,xZ|s[8<7ƨSƩ/DDa\sO,8= 4}_x~42fAg0 ;D"A&F=ArNog_ջ-gyҋ:+ylLU_Fѿw_=Oi={Tϱ5la6A檟j1^ctJ . ti5YÝGXdov[888 x5!~n~([ m$kb(wa04;L}p{4?xKv/%qԒ:xԄP0؋t2ݽB bCU@d!o~{=2c F.hݭ:X^-•p'Ԣ߳?E2gDYm|%Ur~ U;؋ma@]6A; ]n\j˵\ fq7N9/%}vvZPQ}.Dcڳ9hg{*QYi!r ҩ4TOu@!XP |T'u ; aiy޵u nmhȟ\Et^Zr [(>:턫@˩w Onmhȟ\E"=?qJ5ixVˇw?kGz4C6vޤ/N%'VD'\l1ȿvq̓ L[,'zk^o޽\ؓ,LGnErDL=*3leWS#іŃK颒Oߟ5!rY; C46C -M9q|,L"cdvI11M :H; lfVD%S-D̤+4AGO :O<;O`(h.-"r_-g=CV iYm25 2&Z);Q+,kj)qmIE]* 2;}EЯ/d2&ez")Q2ĶhfSVihklIeQ E&ȤB edTB1W(BNnض <7 1l8t D{㹵BiL4*0i!_jEω=r8/%;]N *]ϑS {Rx4`׍qJ1 >*MBYHu2 df\LSXЊNW&3#\@?.nz_vf-65(Zg|MbzH!A.6< kaa$kdݪ鼜u#D}^:SA Ϟ+PjuQu&==]gҢNr{3y3ǜk^-VEa9숑(Fa9npA`Hj O1֖xUL 7ƭ6VPUxKv 9%.{)"V9/*^Ke %mU@<mt]t 2(+%|J;[)' ZɅB)s(ǔ|ZGW8n)GT!_'E) ED[K)h믶SQ43^ZV|IR4Q ,݅D,sxld5Az^}9F~4Tz2MGW'7`W/ :O)t\:6d뭻V?{ )?MKλn)LSm5ya hna='#4sqլe%]Z;n?|7Y"ߛ ._Us+B8dT"(ʄF9!\Z)\]6 JDb܊8eˤiidARoӴ.gMiR\A-E\z#RfGNqz+x45oZ1oJS8bn ׿ڀIԒSEDjU-UuOK7B;NB`duFG!1!"UKԞ [붅Ԍyo'Z!ūO2m\zxu W%9,.hc*9~`mIDճmrxj¨ 0Lt PX ։wfmeZjÍnԌZ\1hrCy"S:/PU4Yu/\dLK4̬epYp7uT ,|7JB!fw_ ]Ԋ `v U%ke9C Iom?ml>7B'yr#Suy}/{lJEi{lmzu˧0*^B^)mx7Q;wkA}Fv&鴖ѷwkݺWnml c>8yӻq=i&{&nK"2[MM*n#zNg4n<#I=wk_˔ݺWn{۔I;@$ct}ܑ@J]\?н4hx:u7rM>I̎zNK`kmG#YbO?afO7f~lQ: Zt`}3 hD i:zhAKz [Rۖu,aZiM=EL CilZ/Ck]R}rEj(G;uCZTrI)KQ!A^ٖJadOjhm; 1i:`Bs>)ԮJXWg(=mOLd|kI5*gr Nt^e &*:')*j9w,k|jVpIS+œt8B|7Р7_c~a;1HFt0M3=A,:ͪEޠg[z?oo(] 4c{|* {!po~5aPEoeK 0y,Յ)Fu w1L;kf7V)8#CyW Cs`ϑT a5!|wB* KFJU:rOI)suzq_5αwAom/'ҙ`"gBlah,e1"3n"E 7Zjl ܤ,% ^ ,= d7 >b!s vK5Dz%E0DԁXI_:pmlԀDͧܐZsnI3hR<BZڰg3Wk%upa2{ A?H3c[}0gs3W'IkIԪH8"9bA§ri8CAhRxDSYV=C%8R (zI 9*O$_mDҟO?Τ*K9e-hS"[\=.?LJkߑ1s^~~I.x"[ GNz " zf?ݗ}N}ۣwB$ X/F3?}?0 "Ki 7. *kF1l`Lf nZJQ׼:˴. b\ZZ98؃gfiD̲RjIй)[)5Ju%ҧ\ZZJOJzߦҊ/0)/ץ<+\>%!}ғRrG Jy_Jig+==+oQ֊K+>Jr]jIjg+=E+]S<4T!|uVޢ=~n\,e>٬iŷFjpԵd1lǙqP' nA.d`&^X,53yIۻPes90zfz$&$] T)iċ3=7z k["gJCM !2"@Kb0,K#-XeE 0\` C!pIw ek&zϽuF4+_F1~.s$rYx7muM!ތͩZm"1zC[*j]JiX:sm ތ"JΞ>̖ΆoDm)plP\Ks\ZL0AX)e U1$1 I91v Ep#}-;o0*AeW-2UXfF[*s_9g.&MjJa-%Cԝsd C*ְ>2{)¹DQ=YΘ;8K˘ϱ$WcPBK0+ົPAP*B+P 23 elF5(p 62  tq@}izX(s#ʤ&p P(I9 tw>T16!Cx/]NS6]{j0|7 TPqE`G98 4$V.V ؋i{p6*8-C!ˇY;dGAב}1;/Y&l&safn4IVGrfZɬp .-˭"{[sq1s+b *qGptp.l9f9\ utQr8TY;@(t\0TS`![(]od۞nCHC xbED䞳"Ci.~> uRt2i^x5G[_ޑ]=p#|ʹvѿ2b96.x}Mݛja^ Z&~(qh4۹JNBMA(>~NѨF{%pޭo4 oѴy-6mOj=A ^%c"* DiJͱwϽ3@/͆ݢ~>eZ[eC66@+IO~vv݉ *"aw~w銤YF6[&njzhL_?Ŗ>xxf=2݃E +Vy/zNqS`q#sM*ļ߫?ݹ*5~BQ֨rhFh#:gqS͒6\)mz]3W&fP9lvUa 뾤b5R=|?]DkՙjB5ogޢ:<܏߽Ƭ?8ƮbϦt~j>'ׯU\nc(l1%h'CkJڪM=p&3 :T#iʙP9kgB, Ѡhoj> ƔHW.WCy9gg0(C=!V Z$Հ Ն-SHЇ;Va YJi=LYh)Wb4(RG 7Ci%M)f1 nȮt В(JY_77R*B`FUyd~߬kN5yݶ[_j!)3cԚ0kȾ1 L5(,NF%})S[jŴ1w)5:=YQ>#Fg}t}u~ͽw?ə[aCbfFOWaľ9xvŏZ,cOS)\bI}+, q+,p+xgf[\5!sѩڭ;\7xC`b1QwTn])tfdZ&4w1:% ǹe()ʼn{nN;Xwioۺŗ nMh\Ek&@0-Un(To-UR*$c '#=\ 2}MM4EzB`>%PaZ Up#Rl?WhץܔZqsǭawaud)4٧&ԲkǨi)cŵq 3 z{O]b˂ǃ/Kto"j~79rkO+HL:ήsHL@2>\1s; Y>'aL^ݻ*Ç.+K_<8-ZXIKe/kl J, Nҽ?yqk*\#*prnV>S ,1)0{6-TZjU&|cg )\HR:JJ{)ifp-$bwy_3Az1_M`(%1O,{ob="T+A`噔b#T! z4}aM~׾$@Kw" B7WqiFO,*kʱȄ35cFخ>X*"鎖?^+ bE{Х펮ڗꂈ[`)9PQ[{jJ/98Xn-qsW5t,.' ^aϤaOD'L FIC{.Ӫ!jL8csˆX)*.Vuˆ wNu(a맲ΔP^.l!3|{ ,*(^~eЫ=X:lJ@AI4<^Ր Tts+J:ܹUe Y?Ğw C&BHJ.̴(ڨq l >ѫ4"͚P6mlk]/<شɧt@}6Ei>4RUjp39}G:mʹnFzs_ר]}i^cCuf`_zdw+eΙ* \ nȽt4'FŐ,dd ^c۹ ܹ1`fHtt?-aq!G)HE܁p$AT~MM}/S㮖h2ק_[e=2}[m}>C V}>|:`omvGmSx]6[;5-Mc*<^K*Bu* RH ?U2[yCyo5Cֳ[C'lO嚑jwI%?{tsG_^y8 Qg],ب6tyb|L4>n O,?]C&%!J fѯv^~#QkK]#S0)9p_: d 102CEʴ02g1LbKHFLRE|f'5$2f{ppm L*ύ\;2q?s )爩ٖuLAvxK~@gk|Vԟ|X\=^;;[^icƠ3YY,Gѵ8ZzB@iPA"W4w^">-_Z#oG((: ]nV@VUgjXο _at`:ޜG~(=rDZqʀsar,%$v~RJ5WE0 hGNO..@ [ݞk3 3|J^hq=!w4I[[tT~[ģKhw_7|e3fM`h x17=#cFáv %~4g[q(h/ʫ[߱WvR-ݼmƷ\ZQ oN`}"D5ݹmBe;4ʾ#Pp'|8y.6F[B4:7RSAây1zs`>ݢP?Y@qgDyEOUA`RT*\BƇ*l]o(TDU P$Բ d^: k1߮C'kY[#P o,pmW9?3T][ kLQs]+9)*ey9y8oסF3g2.e_ BK-Nh9;h(RݡPV$W1܍?"N.rcypԡFG[T$i6v>Eu4[?+QZ<捷U65h\\'p>"%y-y8ίo('rSW(Ss~U[.hGwTγقb&䣮6(~ndE jyw2pٲ^Ǽ `7Z#eA&-9-E/3dm%291` i` w(\L?я|~gpV,Ue3f$*ԳBE )pƏ=  |L[8x`VW_"a 'i[!Y6 PN;ߒrIG eIf-pf!%j^cUO\^|i2՜93U^ bכֿf1/Wзh_^Ҡݑ C9)OgXiv2WpOJAKt攂(bl9gCF[Ig٠ xRU᥏xO+N$H.H#R͙seP ]*7OAA~j^+ZP8oAr׷MBPD3ek<++Sp9?o|I>̪E]w?*窛,Le__0 K@y<;宯M'va ~:Ҥ *D}<{Dm]ԀӤ nܠ~V|]%55h]MosWw27{]L]n3~ WuStd'~օr?x7](첀))m18LsOR}gҲ}i ZI%j;Qfe%y֔Mk?[y'x4&q$>tZ|sCƛ^pה(K>sexL<]_ޔw&+Жw<_")"` Ӗd8펝({4iWǩvC %bGc <~[4~ˈ?F?4HidМh_OÛ`-߭ 0>d uս^]J;΅F$<fP'2iBqΒNNB+WrS%K"x Ht\ ײ`*19H%F\:)>EDJ| HHnO$R6"ҞipRIGU!tA|С%h22Ph"62PWsHPZW-GIgƅ45s C(Q61aAN  dTPy`11٢Ppv7=ThdH< dz eU*},Z`Ct_F6l h> /`gU{}p烋(pT/KNmo m(E$(p!M𽆲FXf>\A̙VPN.kMӁrڬٖxVytxowC}֥ m++i6?Ve@1gM^I> 5|cDOТhvf@w!YQhBfLFZ⭓.K[x//Bt)[X썯s֭`,];Ï+8v`Uǥ"86q2_&bk=pV֕/8;\ǫh%VD-xR z ^fPLҞ8=51 ^> N8OPA >60sO ˠu5^BӢ1 XxE'G*$BoTr[rN~lA (6`Lw<e Y+CXEPڏdtJEPQVѾΣ ʵQ|LGڜ8jTwh6Oڕ?XNE0mW?Z ՂZy<_Hqw]R;7WH*^GVZ߷w*ꏢsHxQ0p;<و}|ݑ%x!-Ӻ۝jڡ$9^J @þ+[Q*3wR^" 6 k7Y- 3~[mεлC$*lI iqm`L;ٹPn΅.+#oסF%pD:s`3~7/e(QhYe}}6{}6Y>DѪy=tJu^4yl ]g;;_M^u;j4Tε3Gd6N a wPN:tIk4淳.^ruM;"]fln})ZԞ8+gNe" (  6E8۽hQٻiNhllm^a3\%IOY,:#-&u9Lz.Uqck( d,96ﺌQBق]}u%: -[j !f0Ľ{CczaˣIQOɳi,w]U @ɱ V.AUl~X6cK̻jרkv[قE[ jƪyI!röjs}}zx]_lVw|9KZZݺ|YmWw~Wٶ9,vTB=atwTɳ0M/߱ Y}}v˺ԻM -òB\o1ߪ)uvx/Ax߶~ygNVgMAS">eFv~[;:]s',_ͷO_2/)1#z%@V涄E6=H╹WQ@i~y)&fm5FADWWS-{3 e0)y@#ZZ@ U[A!$MJpsj{5Ks <B&vJH-ҏ%(SdBs-J YLrŹ ەq,hKd#yO`W:=(K[Gݸ;q]"5Eࢹ;$aĤ k z'>b}\ć(~G4R !Oo5$],,Q`%ԜL%9 nZDsE"Z&vY'ow4s3+ɸM^lJj:^BpX;YTh~ o'@+W,êG[Tw{=UOhL[1@ouTJhiFj֝0Ek>_#9SlEd[ [tl]3n-S|/Xas7R4]"ŸzGE]#xNC`؏rGv+ylxbn^zEGɣ4ZdR,B] Y&nм9ng[ uPkS6m< b0%?>9Cq&1# \UD3 SXfzk YF3} e{tQZ|YmQm:d$7S^ VE"M@ox{yE9txhl4plRB0kG Zb T+,DY%)p=z YNXއg,I+r<M$mފzyl;>N|. ~Ke[<אsr}( K!=|xZVI}8j8VkS;J- ߵj!!656N*mritNz+O{ߘ_.SCUHYuh_GEWV-;˱l1C^M L;,9 9e+ՇVZNa^gVJ$7mV)J>>5OClî>s[Z29WVT@!K~{V$-MLŞe;1/}plC`1ͷ^j0Q1s|zX.甝zOYT-T-8gO rY?TԅJ{}vˋx %SMi>c[)2]wa M6 -;g,v{h4*|/v7<ݼ _av(g L;C-zRЄ^\jɱl1l/+W֫uֲDIpK AeQ'koeƂUf=s"*Kn=;-:4VO{}b,F";`l?IҚScO ׊ NfML ~ScX s¶oHʱ:SR$֥ Q^<죩1ImF!hXA)UcwTckzu]UZ zT*"]BXʵЍJ%wK['VK:Qԫyt^@Glw*\,Wغꩅn1tܤSd @"W 8iQUT醗:NPxeaKuG۬{ť#<! ZE@V`D_:L?uڂlZ†04c bGJ!ZSD 2*ɚS|3N=OEqDI7u'5?~j OWݬ{е}{y*4$B Tw|iQLj>FK]OnXVW1.pgeԉ2Gq4oƣYڳB &ŒS/šR:AҊ3zbtų"Ee l:Lq$*f#%3 [vWTÌ<ɩ"*x Po#2-·38JPgDxXͷEM.:쪴+>>SOt[R}6iޓX. . 2TQF|FQY/ڇjaKF!ʢ<=;Hݏԯb4w[0:rsp4zbZ}qJN:ѽ>pFa4etӭYM[6ݪUam6&>Z>jl(BFmեqooHR^UDwdsꝓh\}9DB s]c?jXWY$ N߾7@9?co@71P됶k^/D9/%.ڤ i{m&.A0?*[oιRmOgF_[N[jֲ@v!;GL4N5|[ 8|.墹pˬL]q=M>:)a[+La1ĨZ>NSUQɤsOs9f$oW}QΫhx0$yW߲'c#dzyN3v>X3sƫ̖tR<&,Ǟ=$#2F7'vٗfY3oYWMK(8mO/= i$n'Z 8}2?[/)=sLK%Z.ے!xV Ml)  4.'E]O};$唺ޚ>)rDM^5֧-뵂$(Srĕ_!O_V $(b)!sșu:r!$QH"n6-žj?df'ӣϺܦ 5ZlH)*T0& "%Í kէZ~Z \CѦPDdbmÁ.%W1p7Oe- epz|(g瘴 bfBz2V@Yքo(!dQ88A/sD!l?sGKX ԰Aw[9rGcMuAX^ڪxPЅ}]_ꔷA,vq6xQ0Ԯv9!#<!= "lxb6om`t.iY6rPQ;^е[ǣQ{rOzw8eW+C0'qχ_!9w;OA!k#v9RoE;ƺ aCmQ<~ @V$EQ|cfogO)HQn"7?>|?ke=99G?d(r%f{W{o wVtTtl<)Vo,3Pd=O)ID}^cZGġ6}lPş}!JaU@=ePXU$2w/ko/7SW83@? >jdCk^ʳV!' 2?l>fTZ_ՅB`m[yw^煶NZD(.)iL)? fǣ3io+GB3ǭP?Ӈ~/S?;?(g)o aN2'|v#wuvKٷJ v7O ]?^@c2Յt|:J/W/ ~oMKuDWhI}r?HR~"]A{^L|!g}Œ:%q3nDi^:->D̷~ |Ԯ&h="%3+W0UBuTu^w}ŮQ;[w<Ӣ^Dg:)ggcOf[6c&'q~5`dcN„:MY)){l;{~I˖Zwꝑ/iuG}k׊;v{,z;fWt92m?0h4;}ryhmv22AR3_&Wux\v[=Y6 8+88,1cӣϦI`eXI_lw9J)ŧbkCw89M&̠-ڦheFPj ghdE%9K/[j9j R;z_i᭶vRah걵I=l2^N)$[m2025g=oyW+wZ/[[ rй?>;?Ú; + ,ɨ 7"umVVn2 Vۨ{)*l`U 9"wHXzlyVY 6ƷCeDD6>'xJSb aMAKisf2B&JmMT`!t4l BF`$KR|/`%XꚱUtŪ]<ܵ.hbnsjQ. 7UH]<ܵnnRH9k6 k hs"jcKɒ9gIFVG%c1/Yi+y|98 #|NFوx}Wm3\5!eɑ YY1`sͩ$iuI+^.,(svzu"^">ǔ8mHdzFѼU&]H&?NO->9KqK`)t,}qZxbf[ ,$ E@u4d&6&zBH8Ҏ, !Qnu|Ijwm$*w۟(nH98sqnlOVYqS l9$E볬,Dc 3d@ZJO<*yFKN0㌝d)A0[hֻ䂼/88mڏg~l]\N9awK K/V=UBjP莧H9'Ol;XRhO`9AVBåPi%ON篊[y-Z>bޭ涟ցêoIJdK hyЪe\jVO nFmQa XF87M BTCH)0O3KvJ{AKb_-zǔ97dC%/)JuX8<H'K( H`k 2sD^4Mj\G'v>~jCiX;v,ղim4/ pUP̟0xfccjkiBE`8a09K±aCP"*lGT>A.7nw1A5R<=m'﫰u49=ů|6y>_ d|qMܺz鏽ۀB~?_*@5_4y:%1<?SD֒%=[@==FZ?~dZX혲8}y6c`?=Š^ͩYbb̊L&$YX}S A'2ߩ8#[}WpIndn=aQFwR`vQ3v߁ZXg<YL|0!W!N 9t. uvOes@[`rR2nx(qK.5 ]R,BXM[Nڄ@D Q*v1t[)wW$#!ܐ[e嶫u)Ϻn-%RZ{!sB:ɽVܗ$y1G+Qsr *zK4LL^ ym^n!Bl9!0ך!jh9_/vmoww)EI 8;Ep[NUqh!ji-IA/*yL(%p>kqNhc|'-)  X^W^3x\;A6A[NGs!h!w1(N"n ѓ5 -긢Qt<!4J7[RRSg.^JzוkEV muOSmழJnF8m7,1-İbݯwϗ!C Q.ɑJ:&CM 0 "1!E7[g*qqB]9wftnJ0 =8gKJQ>3WbA/W Jc+ud6 d lbC[%XeOr:}2מyMrم>v̯}|s}]C۔[Tc9*l.laĞ#!;G~ O[]e|NO2%mH8hwihf1': RV6xFE:xoORo/j0OVG6˅Qh=sX$~+4g֌`ZO-mcլ?r=yqz׽_=3~տ&TQá'fHM`6%heUb,' Am٦kanr;V$u Dsכ*VTc$0ը]:xqnbKK {,-h-7\* دvTsΟ: rpN;#mĩ"Ոn_,`/<_̼d:zˣxϞj-K_-nM'xj6#[v5"s4^ brYVMx koU+Ewd[nX_p6?bJ6#Wbk Q]d=RWX;_g@nG 0K=w%>y,! y*ZKr:+Mrt ֭ \T;X9|(`֭~GքpS"voY7#%x ֭ \T;Xm,;V9ukBC^V)YYef@+=7NS僞(@WghN LwG'QwE^`clXSsÙf`8^EH#AYJs4y2|II}aEJzH;c>Ko4*gGC 9,"bJ }R8صE)g%/" 1>1W _+Ś@Q ٞ߬B{SƑMG÷騧≊m9h |J0$ i\75of6?f:5'1'?yxaF4&|cҝ04cs,cpLzs=?D]jFrz'&qs6z:Qcz?#vb)y4Yj>eIլ2 7̵يl]LJ!{Q"݂P26|oWo?7UeVOaz4&va)f%ĸCP Of<=ׁD+a4t !~}Sؠ _XB|t<0_ių:%)F7 AԐ@-ޟ:[n`~`.5ɀb:0ўw;PPbĜّs|*QBJҋ>mIDBЖ,QA$fPdw*9I=W_Q ( 5 UDNoMۀ@I.Af=]p#5 u!2@D(1F #  CXE#=ˉvsxmf9 ڢ)=?.˯e'LXs'81aBCB$,"`caA)Wr%CǮ>C0 20 1 9Q9 @L C %0C~T:4ܒ!r ( D\4bibiz MYk\~ncCt VfwF4y4.N7I:uxtl^M]w$nO)s>][Dsբ7=EVBJ2I tD&PA% Jpב!%Î[lIuW2v7p)AT 5jb+XB b[؁aZH[[,Dp 8;c7`߲ԍT;VT/4-[(9[2 0#;:Ak‘R5]_ڿ?r{O sczO)Ԕwz̳|kOq6|[F:r@,?k~>8DKE+थT/Zq.<': ȹ5o46y#i^,߾/gL%9OjpV1hdJ{b+{e>3Nˎu4l9홯7:Q3!Z*I3e#N`2ktݞ&C2EEPM0j`Hj81k?dRHI^U6=,M[vT,Da7kjrAp;;`a,]`|9lÍK.5m]ֲ&a⬣HlXҦC.1xĹ&sY#JA`K\UgxĨ;g}pX0$< BG`4ܙ_ RPTJ#a+.%1 gQTD$-5@J}#eE@pl5p,C"`9`M0i@ c*fwYDLXAl0"Ԍ$|KQ8\6m49fx*=nOgS15wp`8E1Z rZ*榥 CTf<zsRZAIŢ-\&bRB # < ,Vf܆F!,BujH5 = ?Ȼ6*u(/rX({E+bɢLJ$!X ̔ö}l׌lky.<7X{C=\WebۏSYhr6,,WذPQ{7+24wiy$Q>4 MJiZ^C,b5ޮb/)dnxb'Dn$cH䮷2y["7JN;h~>n(tLjnU_!n[96B jYv {@1bۊSŲ[86B1T(ĚWϷ4%wYe쨿:mCr ,o B&њ{\͔/-vi]pCߔe/59 ]TMCZjJ:cXC @Sm\=ݧo;Myަ'GHIdVjއ@/Ut^ ȷ] S||tZPnY?JtœڇAS5H>/'0@kcfaK^G}z]"1iK e )[ƁǺ5PBx kd TMszh$SeFҞL.ڗҘ 2 uAσyVI8wWxeKc.?cG܀U/0/ ^#VL>]B-w/QRɌH5l]֩肋f8`s8l =}N& <)XMmdb,vQV{vid)qupK&"&rJel+uω3gK:FLWvu_Q 13181ms (18:30:16.896) Jan 28 18:30:16 crc kubenswrapper[4767]: Trace[1188350729]: [13.181377169s] [13.181377169s] END Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.896737 4767 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.902066 4767 trace.go:236] Trace[466705682]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 18:30:03.382) (total time: 13519ms): Jan 28 18:30:16 crc kubenswrapper[4767]: Trace[466705682]: ---"Objects listed" error: 13519ms (18:30:16.901) Jan 28 18:30:16 crc kubenswrapper[4767]: Trace[466705682]: [13.519376959s] [13.519376959s] END Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.902085 4767 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.902179 4767 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.902715 4767 trace.go:236] Trace[1315068105]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (28-Jan-2026 18:30:03.507) (total time: 13395ms): Jan 28 18:30:16 crc kubenswrapper[4767]: Trace[1315068105]: ---"Objects listed" error: 13394ms (18:30:16.902) Jan 28 18:30:16 crc kubenswrapper[4767]: Trace[1315068105]: [13.395048904s] [13.395048904s] END Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.902736 4767 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.945569 4767 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:57634->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.945633 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:57634->192.168.126.11:17697: read: connection reset by peer" Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.945569 4767 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:57636->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.945926 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:57636->192.168.126.11:17697: read: connection reset by peer" Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.945994 4767 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 28 18:30:16 crc kubenswrapper[4767]: I0128 18:30:16.946036 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.572039 4767 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.573135 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.573164 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.573175 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.573370 4767 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.581500 4767 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.581768 4767 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.581789 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="error getting node \"crc\": node \"crc\" not found" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.584758 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.584791 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.584802 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.584826 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.584838 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:17Z","lastTransitionTime":"2026-01-28T18:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.598885 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.602077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.602134 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.602147 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.602165 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.602178 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:17Z","lastTransitionTime":"2026-01-28T18:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.614339 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.619371 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.619435 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.619450 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.619472 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.619486 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:17Z","lastTransitionTime":"2026-01-28T18:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.628685 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.632561 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.632602 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.632611 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.632628 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.632642 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:17Z","lastTransitionTime":"2026-01-28T18:30:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.641887 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:17Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.642008 4767 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.642044 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.742147 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.757371 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 07:56:45.221248513 +0000 UTC Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.842651 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.899335 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.899930 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.901867 4767 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" exitCode=255 Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.901902 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce"} Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.901950 4767 scope.go:117] "RemoveContainer" containerID="f029f5de4e87fb606a2fd8a33935f97d3b6350990c0302cd9191e8c840984fbd" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.902110 4767 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.903016 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.903057 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.903069 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:17 crc kubenswrapper[4767]: I0128 18:30:17.903684 4767 scope.go:117] "RemoveContainer" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.903862 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 18:30:17 crc kubenswrapper[4767]: E0128 18:30:17.943515 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.043960 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.144878 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.245681 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.346275 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.447141 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.547865 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.648867 4767 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.706315 4767 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.738689 4767 apiserver.go:52] "Watching apiserver" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.750827 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.750888 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.750897 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.750910 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.750920 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:18Z","lastTransitionTime":"2026-01-28T18:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.758095 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 22:15:48.782054953 +0000 UTC Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.762736 4767 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.763149 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-machine-config-operator/machine-config-daemon-skvzp","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-ovn-kubernetes/ovnkube-node-mwmbk","openshift-dns/node-resolver-wcxcp","openshift-multus/multus-additional-cni-plugins-snvkm","openshift-multus/multus-hjjlv"] Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.763498 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.763642 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.763654 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.763750 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.763796 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.763880 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.764165 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.764275 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.764297 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.764356 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.764376 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.764443 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.764585 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.764901 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.766129 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.766637 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.767246 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.767433 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.768849 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.768904 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769088 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.768858 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769321 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769512 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769772 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769804 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769856 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769961 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.769968 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.770069 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.770233 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.770371 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.772682 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.772910 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.773078 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.773283 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.773429 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.773631 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.774179 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.774187 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.774242 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.774261 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.774313 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.774318 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.774402 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.791892 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.803746 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.806560 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.817107 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.818243 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.830247 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.839507 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.841863 4767 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.848255 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.853320 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.853465 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.853479 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.853497 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.853550 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:18Z","lastTransitionTime":"2026-01-28T18:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.859521 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.873613 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.892752 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.905658 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.910498 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915089 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915139 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915163 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915179 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915194 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915229 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915252 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915274 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915299 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915319 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915336 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915353 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915367 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915382 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915423 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915439 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915495 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915515 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915531 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915569 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915555 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915654 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915661 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915684 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915695 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915764 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915797 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915885 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915916 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915918 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915942 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915967 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.915990 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916014 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916037 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916060 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916084 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916105 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916131 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916131 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916156 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916180 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916201 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916241 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916265 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916304 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916330 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916351 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916378 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916405 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916474 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916502 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916528 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916589 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916614 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916651 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916706 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916732 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916756 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916781 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916804 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916828 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916854 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916881 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916906 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916931 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916957 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916989 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917014 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917038 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917062 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917088 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917112 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917135 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917156 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917178 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917200 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917246 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917290 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917314 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917342 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917369 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917394 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917419 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917442 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917515 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917541 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917571 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917599 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917622 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917646 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917669 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917691 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917713 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917737 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917763 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917813 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917838 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917861 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917884 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917908 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917930 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917953 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917979 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918002 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918026 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918050 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918073 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918097 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918119 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918143 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918166 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918189 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918231 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918258 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918282 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918307 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918330 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918353 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918377 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918403 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918426 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918468 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918496 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918522 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918550 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918573 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918596 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918622 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918646 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918670 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918695 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918718 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918741 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918765 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918790 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918818 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918840 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918861 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918886 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918911 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918936 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918960 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918985 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919007 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919033 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919055 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919078 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919122 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919148 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919177 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920328 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920363 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920389 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920415 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920470 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920498 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920523 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920546 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920574 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920603 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920629 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920654 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920677 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920703 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920728 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920753 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920776 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920800 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920825 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920851 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920875 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920903 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920928 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920955 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920981 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921008 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921033 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921057 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921082 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921107 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921134 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921158 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921184 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921227 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921255 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921280 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921305 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921394 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921422 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921454 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921479 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921507 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921537 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921563 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921587 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921611 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921638 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921664 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921691 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921717 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921785 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk46x\" (UniqueName: \"kubernetes.io/projected/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-kube-api-access-gk46x\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921816 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-ovn-kubernetes\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921842 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-bin\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921867 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzlbv\" (UniqueName: \"kubernetes.io/projected/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-kube-api-access-dzlbv\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.921894 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f729b63a-09d0-4095-add6-3e40fbd43e1c-proxy-tls\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924820 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cnibin\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924853 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-netns\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924878 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-systemd\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924902 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-etc-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924923 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924948 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgml5\" (UniqueName: \"kubernetes.io/projected/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-kube-api-access-hgml5\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924969 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-socket-dir-parent\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924990 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zs9t5\" (UniqueName: \"kubernetes.io/projected/9f8067a8-acf1-4dcd-bf77-1e1dea881a39-kube-api-access-zs9t5\") pod \"node-resolver-wcxcp\" (UID: \"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\") " pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925013 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-node-log\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925032 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-config\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925049 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-system-cni-dir\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925081 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925102 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9f8067a8-acf1-4dcd-bf77-1e1dea881a39-hosts-file\") pod \"node-resolver-wcxcp\" (UID: \"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\") " pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925070 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925120 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cni-binary-copy\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925306 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925336 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-os-release\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925354 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-daemon-config\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925371 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-var-lib-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925408 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925583 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925605 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-cni-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925621 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-cni-multus\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925638 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-multus-certs\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925655 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-log-socket\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925671 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-env-overrides\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925699 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-script-lib\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925724 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925750 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-cnibin\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925766 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925783 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r6jtp\" (UniqueName: \"kubernetes.io/projected/f729b63a-09d0-4095-add6-3e40fbd43e1c-kube-api-access-r6jtp\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925806 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925823 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-kubelet\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925840 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-ovn\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925856 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f729b63a-09d0-4095-add6-3e40fbd43e1c-rootfs\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925873 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-k8s-cni-cncf-io\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925889 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-netns\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925943 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-conf-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925975 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-cni-binary-copy\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926000 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-etc-kubernetes\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926019 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-netd\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926040 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926069 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926088 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926109 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926129 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926145 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-hostroot\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926160 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-kubelet\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926175 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-slash\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926197 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916351 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916355 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916379 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916392 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916601 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916605 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916628 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916646 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916863 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916884 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.916907 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917038 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917061 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917156 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917371 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917374 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917406 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917545 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917574 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917611 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917688 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917701 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917728 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917868 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917876 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.917959 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918021 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918033 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.918179 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919110 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919141 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919636 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919758 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919805 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919953 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919993 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.919989 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920155 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.932282 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920172 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.934021 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.934097 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.934271 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.934526 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.933923 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.920306 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924639 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924852 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924937 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.924933 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925381 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925475 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925581 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.925662 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926434 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926633 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.927112 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926899 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.927862 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928038 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928160 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928353 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928394 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928408 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928605 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928762 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.932594 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.932611 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.933410 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.933995 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.934522 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.934687 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.934958 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.935312 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.935410 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.935407 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.935660 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.935771 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.935915 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.936176 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.936706 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.936917 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.937079 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.937217 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.937286 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.937461 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.937782 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.937945 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.938074 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.938265 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.938455 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.938863 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.938871 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.938972 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.939076 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.939111 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.939112 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.939328 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.940643 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.941173 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.941391 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.941692 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.941848 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.941947 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942019 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942082 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942141 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942157 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942234 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942331 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942506 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942527 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942611 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.942719 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.943219 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.943255 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.943571 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.943731 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.943940 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.944166 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.944179 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.928962 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.926418 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f729b63a-09d0-4095-add6-3e40fbd43e1c-mcd-auth-proxy-config\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.948982 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.949331 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.949496 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.949936 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950280 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950302 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950327 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950390 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950634 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950014 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950822 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951155 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951407 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951433 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951593 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951627 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951615 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951684 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951713 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.950958 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951735 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951765 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951802 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-system-cni-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951826 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-cni-bin\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951849 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovn-node-metrics-cert\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951796 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.951961 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.952024 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.952049 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.952552 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.952575 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.929393 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.952854 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.952953 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953164 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953221 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953329 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953174 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953559 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953674 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953831 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.953948 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954223 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954201 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954355 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954396 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954419 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954462 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954226 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954589 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.954634 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.955130 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.955186 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.955534 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.952064 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.955866 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.956138 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.956255 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.956872 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.957037 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.957257 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.957460 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.958078 4767 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.958728 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:19.458702304 +0000 UTC m=+25.422885178 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.958786 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.958876 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-systemd-units\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.958920 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-os-release\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.958952 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-tuning-conf-dir\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.959080 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.959588 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.959652 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:30:19.459634692 +0000 UTC m=+25.423817566 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.959909 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.959971 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:19.459962672 +0000 UTC m=+25.424145536 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960367 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960391 4767 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960432 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960443 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960457 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960468 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960480 4767 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960491 4767 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960503 4767 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960514 4767 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960522 4767 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.960778 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.962308 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.962809 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.966489 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.967199 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.967260 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.967271 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.967796 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.967825 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:18Z","lastTransitionTime":"2026-01-28T18:30:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.968997 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.969900 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.970347 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.974660 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.974690 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.974706 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.974767 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:19.474747602 +0000 UTC m=+25.438930526 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.977910 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.978554 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.979624 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.979679 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.979976 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.979976 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.981103 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.981128 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.981144 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:18 crc kubenswrapper[4767]: E0128 18:30:18.981201 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:19.481182379 +0000 UTC m=+25.445365383 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.981422 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.983650 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.983733 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:30:18 crc kubenswrapper[4767]: I0128 18:30:18.992253 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.000421 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.001910 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.003899 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.010492 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.015187 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.022231 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.029899 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.038330 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.046096 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062386 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-cni-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062694 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-cni-multus\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062704 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-cni-multus\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062744 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-cni-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062807 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-multus-certs\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062834 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-log-socket\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062860 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-env-overrides\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062866 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-log-socket\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062881 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-script-lib\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062903 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062929 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-cnibin\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062958 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.062838 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-multus-certs\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063027 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r6jtp\" (UniqueName: \"kubernetes.io/projected/f729b63a-09d0-4095-add6-3e40fbd43e1c-kube-api-access-r6jtp\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063245 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-cnibin\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063662 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-kubelet\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063710 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-ovn\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063752 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f729b63a-09d0-4095-add6-3e40fbd43e1c-rootfs\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063778 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-k8s-cni-cncf-io\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063804 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-ovn\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063867 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-netns\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063877 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/f729b63a-09d0-4095-add6-3e40fbd43e1c-rootfs\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063918 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-conf-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063927 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063943 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-netns\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063889 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-conf-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.063859 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-kubelet\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064023 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-cni-binary-copy\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064047 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-run-k8s-cni-cncf-io\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064053 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064085 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-etc-kubernetes\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064106 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-netd\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064125 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064129 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-etc-kubernetes\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064149 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-netd\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064172 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-hostroot\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064189 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-kubelet\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064196 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064240 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-slash\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064268 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-hostroot\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064270 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-kubelet\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064303 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-slash\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064273 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f729b63a-09d0-4095-add6-3e40fbd43e1c-mcd-auth-proxy-config\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064396 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064425 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-system-cni-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064448 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-cni-bin\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064474 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovn-node-metrics-cert\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064525 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-systemd-units\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064549 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-os-release\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064575 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-tuning-conf-dir\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064598 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk46x\" (UniqueName: \"kubernetes.io/projected/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-kube-api-access-gk46x\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064619 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-ovn-kubernetes\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064641 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-bin\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064664 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzlbv\" (UniqueName: \"kubernetes.io/projected/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-kube-api-access-dzlbv\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064690 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f729b63a-09d0-4095-add6-3e40fbd43e1c-proxy-tls\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064713 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cnibin\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064735 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-netns\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064738 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-env-overrides\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064755 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-systemd\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064779 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-etc-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064800 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064821 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgml5\" (UniqueName: \"kubernetes.io/projected/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-kube-api-access-hgml5\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064848 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-socket-dir-parent\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064869 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zs9t5\" (UniqueName: \"kubernetes.io/projected/9f8067a8-acf1-4dcd-bf77-1e1dea881a39-kube-api-access-zs9t5\") pod \"node-resolver-wcxcp\" (UID: \"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\") " pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064891 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-node-log\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064913 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064953 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-config\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064968 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-cni-binary-copy\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064979 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-system-cni-dir\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064987 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-system-cni-dir\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.064908 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065019 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-host-var-lib-cni-bin\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065048 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-systemd-units\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065052 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9f8067a8-acf1-4dcd-bf77-1e1dea881a39-hosts-file\") pod \"node-resolver-wcxcp\" (UID: \"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\") " pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065090 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-os-release\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065115 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-etc-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065014 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/9f8067a8-acf1-4dcd-bf77-1e1dea881a39-hosts-file\") pod \"node-resolver-wcxcp\" (UID: \"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\") " pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065174 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cni-binary-copy\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065193 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-os-release\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065229 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-daemon-config\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065245 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-var-lib-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065340 4767 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065351 4767 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065361 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065371 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065379 4767 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065399 4767 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065408 4767 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065416 4767 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065424 4767 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065433 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065442 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065450 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065459 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065470 4767 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065479 4767 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065488 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065497 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065506 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065515 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065523 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065533 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065543 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065554 4767 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065563 4767 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065563 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-tuning-conf-dir\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065574 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-bin\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065590 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-var-lib-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065572 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065612 4767 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065614 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-ovn-kubernetes\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065621 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065640 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cnibin\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065650 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065662 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-netns\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065669 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065683 4767 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065700 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065696 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-script-lib\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065713 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065759 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065764 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-os-release\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065775 4767 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065789 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065802 4767 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065814 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065827 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065839 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065851 4767 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065863 4767 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065875 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065887 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065898 4767 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065909 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065920 4767 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065930 4767 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065941 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065946 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-openvswitch\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065955 4767 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066000 4767 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066011 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066831 4767 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066846 4767 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066858 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066873 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066885 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066897 4767 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066909 4767 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066923 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066935 4767 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066953 4767 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066965 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066975 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066988 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067000 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067012 4767 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067025 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067038 4767 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067050 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067061 4767 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067073 4767 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065684 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-systemd\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067086 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066188 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-socket-dir-parent\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067100 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.065785 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f729b63a-09d0-4095-add6-3e40fbd43e1c-mcd-auth-proxy-config\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067112 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066271 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-system-cni-dir\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066749 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-config\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067147 4767 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.066302 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-node-log\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067166 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067180 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067193 4767 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067223 4767 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067236 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067248 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067258 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067270 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067282 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067293 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067303 4767 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067314 4767 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067326 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067337 4767 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067348 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067360 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067371 4767 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067383 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067396 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067410 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067422 4767 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067435 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067447 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067458 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067473 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067484 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067496 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067509 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067521 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067533 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067544 4767 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067545 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-cni-binary-copy\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067555 4767 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067664 4767 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067677 4767 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067688 4767 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067700 4767 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067711 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067713 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-multus-daemon-config\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067722 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067760 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067774 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067788 4767 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067799 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067821 4767 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067837 4767 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067849 4767 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067862 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067874 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067886 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067898 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067912 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067924 4767 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067934 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067946 4767 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067958 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067971 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067983 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.067995 4767 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068006 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068069 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068082 4767 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068094 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068104 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068115 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068150 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068161 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068175 4767 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068190 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068234 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068252 4767 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068264 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068276 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068287 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068323 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068364 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068402 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068417 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068427 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068438 4767 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068450 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068489 4767 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068504 4767 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068519 4767 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068530 4767 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068541 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068580 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068591 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068602 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068614 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068651 4767 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068664 4767 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068676 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068688 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068700 4767 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068735 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068748 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068761 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068772 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068784 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068824 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068840 4767 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068851 4767 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068861 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068873 4767 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068909 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068921 4767 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068931 4767 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068987 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.068999 4767 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.069551 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.069580 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.069601 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.069618 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.069629 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.069827 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/f729b63a-09d0-4095-add6-3e40fbd43e1c-proxy-tls\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.072264 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovn-node-metrics-cert\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.077910 4767 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.081550 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.082548 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.087003 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgml5\" (UniqueName: \"kubernetes.io/projected/688e15c3-4cd8-41ee-a2c4-f1b31bad4afe-kube-api-access-hgml5\") pod \"multus-additional-cni-plugins-snvkm\" (UID: \"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\") " pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.087352 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk46x\" (UniqueName: \"kubernetes.io/projected/5a8e6ea7-4d55-4222-840b-c0383a9bc7da-kube-api-access-gk46x\") pod \"multus-hjjlv\" (UID: \"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\") " pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.090054 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.090359 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zs9t5\" (UniqueName: \"kubernetes.io/projected/9f8067a8-acf1-4dcd-bf77-1e1dea881a39-kube-api-access-zs9t5\") pod \"node-resolver-wcxcp\" (UID: \"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\") " pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.090373 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzlbv\" (UniqueName: \"kubernetes.io/projected/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-kube-api-access-dzlbv\") pod \"ovnkube-node-mwmbk\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.090874 4767 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.093317 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r6jtp\" (UniqueName: \"kubernetes.io/projected/f729b63a-09d0-4095-add6-3e40fbd43e1c-kube-api-access-r6jtp\") pod \"machine-config-daemon-skvzp\" (UID: \"f729b63a-09d0-4095-add6-3e40fbd43e1c\") " pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.094130 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: W0128 18:30:19.099437 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-eed8d05c7c441708de9e0c9fefa8de4f30a5f8f361d15c284049acaae363e716 WatchSource:0}: Error finding container eed8d05c7c441708de9e0c9fefa8de4f30a5f8f361d15c284049acaae363e716: Status 404 returned error can't find the container with id eed8d05c7c441708de9e0c9fefa8de4f30a5f8f361d15c284049acaae363e716 Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.102232 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-wcxcp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.105881 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.108960 4767 csr.go:261] certificate signing request csr-rtwkl is approved, waiting to be issued Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.110085 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.118709 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.125918 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-hjjlv" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.126172 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.129468 4767 csr.go:257] certificate signing request csr-rtwkl is issued Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.131199 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.137813 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-snvkm" Jan 28 18:30:19 crc kubenswrapper[4767]: W0128 18:30:19.153860 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-5e4f3045a8aae9a3815a3b99a35147e7a02d17e686172363251ca1c72ea960ca WatchSource:0}: Error finding container 5e4f3045a8aae9a3815a3b99a35147e7a02d17e686172363251ca1c72ea960ca: Status 404 returned error can't find the container with id 5e4f3045a8aae9a3815a3b99a35147e7a02d17e686172363251ca1c72ea960ca Jan 28 18:30:19 crc kubenswrapper[4767]: W0128 18:30:19.169029 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf729b63a_09d0_4095_add6_3e40fbd43e1c.slice/crio-00cee3710719ce63018c1ada9272c9e0bb183e36d276291900e71434dc1364d9 WatchSource:0}: Error finding container 00cee3710719ce63018c1ada9272c9e0bb183e36d276291900e71434dc1364d9: Status 404 returned error can't find the container with id 00cee3710719ce63018c1ada9272c9e0bb183e36d276291900e71434dc1364d9 Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.173556 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.173602 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.173615 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.173633 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.173644 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.281678 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.282123 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.282137 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.282178 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.282193 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.384170 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.384520 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.384543 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.384564 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.384577 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.472904 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.472932 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:30:20.472914177 +0000 UTC m=+26.437097041 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.473090 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.473170 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.473266 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.473307 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:20.473297438 +0000 UTC m=+26.437480312 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.473320 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.473386 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:20.473370331 +0000 UTC m=+26.437553205 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.487189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.487271 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.487283 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.487299 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.487309 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.574274 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.574337 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574501 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574523 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574536 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574553 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574589 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574600 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:20.574582682 +0000 UTC m=+26.538765556 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574601 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.574645 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:20.574635313 +0000 UTC m=+26.538818187 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.589682 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.589716 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.589724 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.589739 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.589748 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.691810 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.691839 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.691847 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.691861 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.691870 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.759120 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 08:49:45.837678771 +0000 UTC Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.794511 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.794572 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.794584 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.794605 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.794644 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.896512 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.896549 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.896558 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.896570 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.896580 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.911341 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.911397 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"eed8d05c7c441708de9e0c9fefa8de4f30a5f8f361d15c284049acaae363e716"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.912517 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerStarted","Data":"8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.912549 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerStarted","Data":"87e4dbf1f3c74435c15977da1c6e2511ca8cad4a1c7b7d086d2569dc7f1de39d"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.913874 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.913911 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.913923 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"5e4f3045a8aae9a3815a3b99a35147e7a02d17e686172363251ca1c72ea960ca"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.915262 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c" exitCode=0 Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.915306 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.915325 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"54c8264bb62cb6c7eb8784cd67fc571df58f43d61b15a28e7c3c17e684a4901b"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.921398 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hjjlv" event={"ID":"5a8e6ea7-4d55-4222-840b-c0383a9bc7da","Type":"ContainerStarted","Data":"4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.921567 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hjjlv" event={"ID":"5a8e6ea7-4d55-4222-840b-c0383a9bc7da","Type":"ContainerStarted","Data":"5ab45ea25dbd806dbb9cb0baf9d585ec8a282031885b340210505db1044b77bc"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.924349 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.924390 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.924404 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"00cee3710719ce63018c1ada9272c9e0bb183e36d276291900e71434dc1364d9"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.925593 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.926500 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-wcxcp" event={"ID":"9f8067a8-acf1-4dcd-bf77-1e1dea881a39","Type":"ContainerStarted","Data":"342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.926541 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-wcxcp" event={"ID":"9f8067a8-acf1-4dcd-bf77-1e1dea881a39","Type":"ContainerStarted","Data":"7e650cb94cbef2e6996678cd57a5007e046a52006dea5553051376ab9a970aa2"} Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.927885 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"5dc659f7d3222dcb4796848285ec8e6e0f139a8661f93bd95f57d57d14a92af2"} Jan 28 18:30:19 crc kubenswrapper[4767]: E0128 18:30:19.938308 4767 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"etcd-crc\" already exists" pod="openshift-etcd/etcd-crc" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.938735 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.954995 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.964912 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.977513 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.989687 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.999253 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.999430 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.999502 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.999578 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:19 crc kubenswrapper[4767]: I0128 18:30:19.999679 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:19Z","lastTransitionTime":"2026-01-28T18:30:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.000128 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.010446 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.021250 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.028464 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.045883 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.059183 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.067869 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.079535 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.090450 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.101486 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.101521 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.101530 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.101543 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.101553 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.101649 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.110430 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.126319 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.132081 4767 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-28 18:25:19 +0000 UTC, rotation deadline is 2026-12-18 15:42:35.908972505 +0000 UTC Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.132146 4767 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7773h12m15.776830792s for next certificate rotation Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.138533 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.150050 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.159450 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.170022 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.179294 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.194971 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.203353 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.203386 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.203395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.203409 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.203418 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.305615 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.305932 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.305944 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.305963 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.305974 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.408953 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.408997 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.409010 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.409029 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.409043 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.482831 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.482927 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.482970 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.483066 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.483098 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:30:22.483045205 +0000 UTC m=+28.447228079 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.483172 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:22.483154178 +0000 UTC m=+28.447337082 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.483316 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.483396 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:22.483381155 +0000 UTC m=+28.447564069 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.493172 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-28qmq"] Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.493677 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.495568 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.495922 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.496888 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.496893 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.509584 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.510966 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.511003 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.511015 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.511032 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.511045 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.526172 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.534423 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.552309 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.559958 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.574999 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.584292 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwrf8\" (UniqueName: \"kubernetes.io/projected/7537ad70-081f-465c-bead-4f4e288c8405-kube-api-access-hwrf8\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.584343 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7537ad70-081f-465c-bead-4f4e288c8405-host\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.584361 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7537ad70-081f-465c-bead-4f4e288c8405-serviceca\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.584383 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.584504 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584557 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584591 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584603 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584614 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584635 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584648 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584691 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:22.584677989 +0000 UTC m=+28.548860863 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.584706 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:22.584700159 +0000 UTC m=+28.548883033 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.586271 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.595949 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.605582 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.615633 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.615669 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.615683 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.615697 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.615707 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.625588 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.635807 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.646663 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.655112 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.685905 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7537ad70-081f-465c-bead-4f4e288c8405-host\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.685942 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7537ad70-081f-465c-bead-4f4e288c8405-serviceca\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.685991 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwrf8\" (UniqueName: \"kubernetes.io/projected/7537ad70-081f-465c-bead-4f4e288c8405-kube-api-access-hwrf8\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.686095 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/7537ad70-081f-465c-bead-4f4e288c8405-host\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.687258 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/7537ad70-081f-465c-bead-4f4e288c8405-serviceca\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.704626 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwrf8\" (UniqueName: \"kubernetes.io/projected/7537ad70-081f-465c-bead-4f4e288c8405-kube-api-access-hwrf8\") pod \"node-ca-28qmq\" (UID: \"7537ad70-081f-465c-bead-4f4e288c8405\") " pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.719599 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.719662 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.719679 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.719705 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.719722 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.759704 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 21:16:50.855474208 +0000 UTC Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.795590 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.795608 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.795744 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.795859 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.796078 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:20 crc kubenswrapper[4767]: E0128 18:30:20.796247 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.800538 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.801552 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.802609 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.803559 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.804450 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.804864 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-28qmq" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.805256 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.806176 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.807039 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.809388 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.810502 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.811316 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.812252 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.813116 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.813884 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.814540 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.815145 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.817371 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.817973 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.818690 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.819539 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.820232 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.820893 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.821618 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.821639 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.821649 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.821665 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.821675 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.822820 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.823550 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.824475 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.825468 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.826312 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.826919 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.827774 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.828765 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.829315 4767 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.829419 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.831501 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.832014 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.832470 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.834382 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.835028 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.835584 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.836634 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.837646 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.838124 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.838749 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.839790 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.840759 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.841229 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.842097 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.845910 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.848156 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.848833 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.849883 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.850543 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.851305 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.852504 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.853150 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.924241 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.924274 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.924282 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.924299 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.924309 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:20Z","lastTransitionTime":"2026-01-28T18:30:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.931100 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-28qmq" event={"ID":"7537ad70-081f-465c-bead-4f4e288c8405","Type":"ContainerStarted","Data":"3d6abe4cdc954697b6f82bb557afd4379db4401ee163a34d0d594cbcb46de2b3"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.933343 4767 generic.go:334] "Generic (PLEG): container finished" podID="688e15c3-4cd8-41ee-a2c4-f1b31bad4afe" containerID="8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a" exitCode=0 Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.933460 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerDied","Data":"8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.939680 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.939842 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.939861 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.939900 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.950689 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.957878 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.967714 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.977631 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 28 18:30:20 crc kubenswrapper[4767]: I0128 18:30:20.994833 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:20Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.005813 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.019956 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.026734 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.026762 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.026770 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.026784 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.026793 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.033979 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.046906 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.063040 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.075582 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.089332 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.108452 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.128626 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.128663 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.128678 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.128694 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.128704 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.230902 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.231217 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.231229 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.231243 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.231254 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.333741 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.333782 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.333792 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.333808 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.333821 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.436739 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.436774 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.436784 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.436798 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.436809 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.539198 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.539251 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.539260 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.539274 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.539291 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.641890 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.641930 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.641940 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.641956 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.641967 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.744019 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.744269 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.744373 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.744449 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.744510 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.760408 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-22 13:29:14.066566051 +0000 UTC Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.846539 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.846802 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.846899 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.847004 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.847080 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.943618 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.945723 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerStarted","Data":"f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.951313 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.951349 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.951361 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.951378 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.951392 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:21Z","lastTransitionTime":"2026-01-28T18:30:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.956811 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.956884 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.960000 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-28qmq" event={"ID":"7537ad70-081f-465c-bead-4f4e288c8405","Type":"ContainerStarted","Data":"5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343"} Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.960365 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.976174 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:21 crc kubenswrapper[4767]: I0128 18:30:21.989136 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.001241 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:21Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.015276 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.034250 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.051905 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.053072 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.053112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.053120 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.053136 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.053145 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.065828 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.078148 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.089798 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.102736 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.120674 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.131695 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.145055 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.155302 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.155333 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.155341 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.155356 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.155341 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.155367 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.166801 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.178330 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.189809 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.201979 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.212399 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.223249 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.242472 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.255434 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.257269 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.257380 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.257585 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.257856 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.258084 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.261186 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.264981 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.266685 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.276423 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.289320 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.300466 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.310900 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.324518 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.340665 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.352343 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.361377 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.361438 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.361450 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.361474 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.361490 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.363252 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.377865 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.391441 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.405437 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.418233 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.433453 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.451546 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.465341 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.465384 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.465394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.465412 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.465424 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.466679 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.479890 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.493574 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.502977 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.503088 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.503155 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.503165 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:30:26.503136567 +0000 UTC m=+32.467319441 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.503237 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:26.503188388 +0000 UTC m=+32.467371262 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.503343 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.503540 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.503613 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:26.50360287 +0000 UTC m=+32.467785814 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.604916 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.605179 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.605422 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.605553 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.605644 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.605771 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:26.60575272 +0000 UTC m=+32.569935594 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.606484 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.606577 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.606649 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.606747 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:26.60673443 +0000 UTC m=+32.570917314 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.680430 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.680468 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.680481 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.680498 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.680511 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.761268 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 19:26:02.266222832 +0000 UTC Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.782501 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.782542 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.782554 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.782570 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.782582 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.794933 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.794950 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.794966 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.795393 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.795490 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:22 crc kubenswrapper[4767]: E0128 18:30:22.795554 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.884844 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.885057 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.885117 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.885184 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.885305 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.964280 4767 generic.go:334] "Generic (PLEG): container finished" podID="688e15c3-4cd8-41ee-a2c4-f1b31bad4afe" containerID="f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9" exitCode=0 Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.964543 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerDied","Data":"f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.975853 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.986763 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.986930 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.987000 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.987092 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.987152 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:22Z","lastTransitionTime":"2026-01-28T18:30:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:22 crc kubenswrapper[4767]: I0128 18:30:22.992629 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:22Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.005039 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.023742 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.035415 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.048019 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.067877 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.079838 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.089488 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.089525 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.089533 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.089548 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.089558 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.094147 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.105381 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.119925 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.135459 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.154850 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.165423 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.191843 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.191894 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.191905 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.191924 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.191936 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.294024 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.294063 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.294074 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.294093 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.294105 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.396705 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.396747 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.396757 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.396774 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.396794 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.499126 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.499166 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.499177 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.499195 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.499222 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.601154 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.601189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.601198 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.601229 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.601239 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.703856 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.703909 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.703921 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.703941 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.703954 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.762271 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 12:09:08.315994851 +0000 UTC Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.806168 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.806226 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.806236 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.806251 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.806262 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.909247 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.909287 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.909298 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.909315 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.909326 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:23Z","lastTransitionTime":"2026-01-28T18:30:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.971771 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.973781 4767 generic.go:334] "Generic (PLEG): container finished" podID="688e15c3-4cd8-41ee-a2c4-f1b31bad4afe" containerID="c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d" exitCode=0 Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.973820 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerDied","Data":"c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d"} Jan 28 18:30:23 crc kubenswrapper[4767]: I0128 18:30:23.998637 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:23Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.011324 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.011359 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.011371 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.011391 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.011404 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.013584 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.029569 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.041552 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.053446 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.064155 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.081804 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.091678 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.102008 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.113136 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.113176 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.113189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.113295 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.113311 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.118595 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.131301 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.143429 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.156558 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.167196 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.215507 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.215724 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.215811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.215881 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.215938 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.317940 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.317975 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.317984 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.317998 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.318009 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.420958 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.421001 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.421012 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.421029 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.421039 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.524421 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.524491 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.524508 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.524540 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.524560 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.600310 4767 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.627269 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.627325 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.627338 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.627360 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.627374 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.729699 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.729862 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.729971 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.730069 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.730254 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.763352 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 09:06:07.750181308 +0000 UTC Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.794949 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.795013 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:24 crc kubenswrapper[4767]: E0128 18:30:24.795236 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.795354 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:24 crc kubenswrapper[4767]: E0128 18:30:24.795487 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:24 crc kubenswrapper[4767]: E0128 18:30:24.795589 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.809781 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.832735 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.832781 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.832793 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.832811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.832823 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.832924 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.844793 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.860556 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.870184 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.882425 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.884069 4767 scope.go:117] "RemoveContainer" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" Jan 28 18:30:24 crc kubenswrapper[4767]: E0128 18:30:24.884802 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.885040 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.896474 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.918000 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.930996 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.935073 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.935115 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.935128 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.935150 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.935164 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:24Z","lastTransitionTime":"2026-01-28T18:30:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.944374 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.956354 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.967938 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.980723 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:24Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.982105 4767 generic.go:334] "Generic (PLEG): container finished" podID="688e15c3-4cd8-41ee-a2c4-f1b31bad4afe" containerID="48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad" exitCode=0 Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.982171 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerDied","Data":"48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad"} Jan 28 18:30:24 crc kubenswrapper[4767]: I0128 18:30:24.982684 4767 scope.go:117] "RemoveContainer" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" Jan 28 18:30:24 crc kubenswrapper[4767]: E0128 18:30:24.982830 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.005837 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.018426 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.035135 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.037793 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.037836 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.037849 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.037866 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.037877 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.050724 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.065169 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.076659 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.098493 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.110543 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.122277 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.132059 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.140157 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.140193 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.140202 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.140229 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.140239 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.143607 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.157054 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.175278 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.185037 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.202362 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.210921 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.227946 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:25Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.242225 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.242250 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.242262 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.242278 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.242288 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.344077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.344109 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.344118 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.344131 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.344140 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.446312 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.446345 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.446356 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.446372 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.446383 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.549047 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.549081 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.549092 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.549110 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.549122 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.651409 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.651452 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.651460 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.651480 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.651489 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.764750 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.764786 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.764797 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.764813 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.764823 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.765197 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 14:09:21.476561319 +0000 UTC Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.867128 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.867195 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.867245 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.867312 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.867324 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.970040 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.970375 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.970386 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.970403 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:25 crc kubenswrapper[4767]: I0128 18:30:25.970415 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:25Z","lastTransitionTime":"2026-01-28T18:30:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.072169 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.072229 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.072242 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.072261 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.072273 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.174914 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.174962 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.174974 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.174991 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.175022 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.277358 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.277395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.277405 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.277421 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.277434 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.379570 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.379608 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.379619 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.379636 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.379648 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.482370 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.482406 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.482415 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.482430 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.482440 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.538961 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.539052 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.539107 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.539185 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:30:34.539152695 +0000 UTC m=+40.503335569 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.539197 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.539292 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:34.539282209 +0000 UTC m=+40.503465083 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.539340 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.539451 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:34.539423583 +0000 UTC m=+40.503606467 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.585004 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.585043 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.585051 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.585068 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.585078 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.640654 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.640705 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.640864 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.640892 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.640898 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.640928 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.640947 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.641005 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:34.640985625 +0000 UTC m=+40.605168559 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.640906 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.641125 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:34.641105518 +0000 UTC m=+40.605288462 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.687598 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.687631 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.687639 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.687653 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.687661 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.765811 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 13:02:42.490074599 +0000 UTC Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.789679 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.789721 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.789732 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.789748 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.789760 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.795010 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.795040 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.795120 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.795131 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.795305 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:26 crc kubenswrapper[4767]: E0128 18:30:26.795413 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.891459 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.891497 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.891510 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.891527 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.891542 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.992727 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.992754 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.992772 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.992785 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.992799 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:26Z","lastTransitionTime":"2026-01-28T18:30:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.994761 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705"} Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.995545 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.995590 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:26 crc kubenswrapper[4767]: I0128 18:30:26.998775 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerStarted","Data":"2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.011473 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.022646 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.022915 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.024772 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.033709 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.044181 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.057366 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.068410 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.080010 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.092034 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.094637 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.094671 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.094680 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.094693 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.094702 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.106058 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.119659 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.138161 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.156190 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.167143 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.182851 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.196554 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.197043 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.197077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.197089 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.197111 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.197123 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.210288 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.220479 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.238504 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.250401 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.262093 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.276251 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.288250 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.299498 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.299538 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.299551 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.299569 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.299580 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.300351 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.313587 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.331807 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.347198 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.356633 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.372807 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.387464 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.401607 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.401659 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.401673 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.401696 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.401712 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.407581 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.504591 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.504672 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.504692 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.504718 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.504734 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.607637 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.607686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.607698 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.607717 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.607729 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.710732 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.711019 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.711107 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.711188 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.711332 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.766984 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 18:57:21.246374072 +0000 UTC Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.813719 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.813758 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.813770 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.813788 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.813802 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.899502 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.899767 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.899875 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.899988 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.900086 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: E0128 18:30:27.911761 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.915756 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.915944 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.916046 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.916151 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.916282 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: E0128 18:30:27.929481 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.932739 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.932885 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.932980 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.933080 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.933157 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: E0128 18:30:27.943272 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.946158 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.946195 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.946220 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.946261 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.946272 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: E0128 18:30:27.956010 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.958536 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.958650 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.958780 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.958857 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.958930 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:27 crc kubenswrapper[4767]: E0128 18:30:27.970339 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:27Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:27Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:27 crc kubenswrapper[4767]: E0128 18:30:27.970511 4767 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.976706 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.976752 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.976761 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.976776 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:27 crc kubenswrapper[4767]: I0128 18:30:27.976785 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:27Z","lastTransitionTime":"2026-01-28T18:30:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.001302 4767 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.079476 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.079509 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.079518 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.079532 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.079542 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.182349 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.182386 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.182395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.182414 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.182423 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.285080 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.285156 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.285176 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.285200 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.285258 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.388439 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.388933 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.388943 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.388960 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.388973 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.491428 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.491463 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.491475 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.491516 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.491528 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.593754 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.593797 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.593810 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.593828 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.593840 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.696182 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.696231 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.696241 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.696255 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.696264 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.767694 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 03:54:45.517044958 +0000 UTC Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.795138 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.795231 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.795163 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:28 crc kubenswrapper[4767]: E0128 18:30:28.795415 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:28 crc kubenswrapper[4767]: E0128 18:30:28.795522 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:28 crc kubenswrapper[4767]: E0128 18:30:28.795634 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.798838 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.798887 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.798911 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.798956 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.798981 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.901155 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.901234 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.901250 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.901273 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:28 crc kubenswrapper[4767]: I0128 18:30:28.901288 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:28Z","lastTransitionTime":"2026-01-28T18:30:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.003322 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.003355 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.003365 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.003381 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.003396 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.007963 4767 generic.go:334] "Generic (PLEG): container finished" podID="688e15c3-4cd8-41ee-a2c4-f1b31bad4afe" containerID="2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b" exitCode=0 Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.008111 4767 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.008322 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerDied","Data":"2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.024454 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.037725 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.052039 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.066284 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.079330 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.089616 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.102881 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.105948 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.105983 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.105992 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.106007 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.106017 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.113739 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.125662 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.144953 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.156540 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.167735 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.179865 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.192153 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.207964 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.208005 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.208014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.208028 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.208038 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.212897 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:29Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.310295 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.310362 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.310372 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.310387 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.310398 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.412927 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.412989 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.413006 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.413026 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.413037 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.514938 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.514976 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.514988 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.515003 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.515012 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.618187 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.618238 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.618248 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.618262 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.618271 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.720707 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.720754 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.720766 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.720786 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.720799 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.768369 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 06:34:19.057763008 +0000 UTC Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.824005 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.824088 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.824111 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.824136 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.824153 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.927943 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.927994 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.928006 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.928023 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:29 crc kubenswrapper[4767]: I0128 18:30:29.928034 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:29Z","lastTransitionTime":"2026-01-28T18:30:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.016675 4767 generic.go:334] "Generic (PLEG): container finished" podID="688e15c3-4cd8-41ee-a2c4-f1b31bad4afe" containerID="662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf" exitCode=0 Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.016735 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerDied","Data":"662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.030474 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.030532 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.030543 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.030576 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.030589 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.040588 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.050927 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.065891 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.079890 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.090856 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.106664 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.122426 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.133069 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.133114 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.133125 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.133148 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.133165 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.137369 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.152463 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.171818 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.188627 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.202168 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.215712 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.232914 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.235709 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.235750 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.235762 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.235782 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.235795 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.254692 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.338652 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.338693 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.338704 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.338716 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.338725 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.403386 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp"] Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.403929 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.406396 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.406738 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.419323 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.430722 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.440920 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.440964 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.440974 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.440991 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.441003 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.446410 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.465780 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.476413 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/03298ae7-d68a-454d-a610-7e92c936df1a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.476500 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/03298ae7-d68a-454d-a610-7e92c936df1a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.476520 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/03298ae7-d68a-454d-a610-7e92c936df1a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.476554 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bbssx\" (UniqueName: \"kubernetes.io/projected/03298ae7-d68a-454d-a610-7e92c936df1a-kube-api-access-bbssx\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.483593 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.503610 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.516876 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.534006 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.544314 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.544366 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.544378 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.544398 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.544410 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.553968 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.572273 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.577872 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bbssx\" (UniqueName: \"kubernetes.io/projected/03298ae7-d68a-454d-a610-7e92c936df1a-kube-api-access-bbssx\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.577984 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/03298ae7-d68a-454d-a610-7e92c936df1a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.578013 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/03298ae7-d68a-454d-a610-7e92c936df1a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.578034 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/03298ae7-d68a-454d-a610-7e92c936df1a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.578927 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/03298ae7-d68a-454d-a610-7e92c936df1a-env-overrides\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.579301 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/03298ae7-d68a-454d-a610-7e92c936df1a-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.586534 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/03298ae7-d68a-454d-a610-7e92c936df1a-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.594231 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.600475 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bbssx\" (UniqueName: \"kubernetes.io/projected/03298ae7-d68a-454d-a610-7e92c936df1a-kube-api-access-bbssx\") pod \"ovnkube-control-plane-749d76644c-xxtqp\" (UID: \"03298ae7-d68a-454d-a610-7e92c936df1a\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.609110 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.624876 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.644363 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.647494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.647540 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.647554 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.647577 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.647592 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.671630 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.697905 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:30Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.716111 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" Jan 28 18:30:30 crc kubenswrapper[4767]: W0128 18:30:30.733625 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod03298ae7_d68a_454d_a610_7e92c936df1a.slice/crio-66efe9581b30af72cf1d4baa89d820e9f159930f028e7856e62ac53ab64b6ef6 WatchSource:0}: Error finding container 66efe9581b30af72cf1d4baa89d820e9f159930f028e7856e62ac53ab64b6ef6: Status 404 returned error can't find the container with id 66efe9581b30af72cf1d4baa89d820e9f159930f028e7856e62ac53ab64b6ef6 Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.750810 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.751242 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.751255 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.751279 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.751293 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.769127 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 11:50:58.541910353 +0000 UTC Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.794792 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.794859 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.794897 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:30 crc kubenswrapper[4767]: E0128 18:30:30.794987 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:30 crc kubenswrapper[4767]: E0128 18:30:30.795094 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:30 crc kubenswrapper[4767]: E0128 18:30:30.795162 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.855735 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.855783 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.855797 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.855816 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.855827 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.958981 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.959014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.959023 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.959041 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:30 crc kubenswrapper[4767]: I0128 18:30:30.959056 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:30Z","lastTransitionTime":"2026-01-28T18:30:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.022388 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" event={"ID":"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe","Type":"ContainerStarted","Data":"5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.023173 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" event={"ID":"03298ae7-d68a-454d-a610-7e92c936df1a","Type":"ContainerStarted","Data":"66efe9581b30af72cf1d4baa89d820e9f159930f028e7856e62ac53ab64b6ef6"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.041816 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.055887 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.061861 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.061925 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.061942 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.061965 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.061980 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.066653 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.078348 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.090266 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.100839 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.123128 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.136087 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.151244 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.164437 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.164478 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.164490 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.164507 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.164522 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.182174 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.206644 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.226164 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.246176 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.260773 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.266774 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.266809 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.266820 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.266838 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.266867 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.275038 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.289982 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:31Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.369342 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.369392 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.369412 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.369438 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.369458 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.473551 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.473613 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.473627 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.473653 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.473676 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.576036 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.576068 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.576077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.576092 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.576101 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.678561 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.678614 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.678626 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.678645 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.678658 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.770437 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 16:09:10.40421953 +0000 UTC Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.781889 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.781964 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.781980 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.782007 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.782023 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.884850 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.884891 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.884901 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.884941 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.884956 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.987257 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.987313 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.987327 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.987354 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:31 crc kubenswrapper[4767]: I0128 18:30:31.987371 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:31Z","lastTransitionTime":"2026-01-28T18:30:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.028421 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" event={"ID":"03298ae7-d68a-454d-a610-7e92c936df1a","Type":"ContainerStarted","Data":"586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.090187 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.090269 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.090281 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.090300 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.090314 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.193101 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.193161 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.193171 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.193190 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.193224 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.283626 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-qbch4"] Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.284137 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:32 crc kubenswrapper[4767]: E0128 18:30:32.284230 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.296035 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.296094 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.296113 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.296141 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.296158 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.297148 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.314065 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.331357 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.365071 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.388469 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.397478 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2hnrq\" (UniqueName: \"kubernetes.io/projected/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-kube-api-access-2hnrq\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.398162 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.399447 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.399501 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.399513 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.399531 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.399544 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.405400 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.417831 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.430154 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.448081 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.460318 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.474817 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.487078 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.499007 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.499143 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2hnrq\" (UniqueName: \"kubernetes.io/projected/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-kube-api-access-2hnrq\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:32 crc kubenswrapper[4767]: E0128 18:30:32.499246 4767 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:32 crc kubenswrapper[4767]: E0128 18:30:32.499341 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs podName:0dc01d59-d401-4c7c-9eec-0a67aa5261fc nodeName:}" failed. No retries permitted until 2026-01-28 18:30:32.999317022 +0000 UTC m=+38.963499896 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs") pod "network-metrics-daemon-qbch4" (UID: "0dc01d59-d401-4c7c-9eec-0a67aa5261fc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.503658 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.503722 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.503734 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.503756 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.503769 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.504468 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.519077 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2hnrq\" (UniqueName: \"kubernetes.io/projected/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-kube-api-access-2hnrq\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.521161 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.537120 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.549382 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.563722 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:32Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.607093 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.607149 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.607168 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.607194 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.607244 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.710172 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.710289 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.710315 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.710350 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.710370 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.771461 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-17 11:17:34.158067222 +0000 UTC Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.795090 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.795108 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.795199 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:32 crc kubenswrapper[4767]: E0128 18:30:32.795348 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:32 crc kubenswrapper[4767]: E0128 18:30:32.795547 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:32 crc kubenswrapper[4767]: E0128 18:30:32.795836 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.813829 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.813932 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.813961 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.814016 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.814043 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.917006 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.917057 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.917068 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.917093 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:32 crc kubenswrapper[4767]: I0128 18:30:32.917111 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:32Z","lastTransitionTime":"2026-01-28T18:30:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.007898 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:33 crc kubenswrapper[4767]: E0128 18:30:33.008118 4767 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:33 crc kubenswrapper[4767]: E0128 18:30:33.008238 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs podName:0dc01d59-d401-4c7c-9eec-0a67aa5261fc nodeName:}" failed. No retries permitted until 2026-01-28 18:30:34.008181972 +0000 UTC m=+39.972364856 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs") pod "network-metrics-daemon-qbch4" (UID: "0dc01d59-d401-4c7c-9eec-0a67aa5261fc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.019930 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.019972 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.019982 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.019994 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.020005 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.123016 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.123097 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.123112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.123137 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.123155 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.226776 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.226823 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.226836 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.226863 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.226877 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.329966 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.330008 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.330017 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.330034 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.330046 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.433669 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.433721 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.433733 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.433751 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.433767 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.537894 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.538004 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.538035 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.538075 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.538101 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.640609 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.640646 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.640660 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.640679 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.640692 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.743547 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.743596 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.743609 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.743630 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.743643 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.772355 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 04:06:54.330933188 +0000 UTC Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.794825 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:33 crc kubenswrapper[4767]: E0128 18:30:33.794973 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.848328 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.848393 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.848406 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.848427 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.848442 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.952035 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.952078 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.952087 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.952100 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:33 crc kubenswrapper[4767]: I0128 18:30:33.952109 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:33Z","lastTransitionTime":"2026-01-28T18:30:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.020650 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.020808 4767 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.020871 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs podName:0dc01d59-d401-4c7c-9eec-0a67aa5261fc nodeName:}" failed. No retries permitted until 2026-01-28 18:30:36.020854948 +0000 UTC m=+41.985037842 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs") pod "network-metrics-daemon-qbch4" (UID: "0dc01d59-d401-4c7c-9eec-0a67aa5261fc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.039918 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" event={"ID":"03298ae7-d68a-454d-a610-7e92c936df1a","Type":"ContainerStarted","Data":"e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.042879 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/0.log" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.048313 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705" exitCode=1 Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.048390 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.049112 4767 scope.go:117] "RemoveContainer" containerID="4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.058628 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.058661 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.058670 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.058687 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.058696 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.064394 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.079194 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.100698 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.113759 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.135115 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.150302 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.161729 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.161814 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.161825 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.161846 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.161862 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.169825 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.182865 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.194121 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.208480 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.225423 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.241370 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.256461 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.271234 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.271413 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.271453 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.271487 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.271508 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.284714 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.301155 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.317119 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.332871 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.349538 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.367450 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.374598 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.374675 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.374696 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.374725 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.374747 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.382580 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.402443 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.421379 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.436170 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.450339 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.478044 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.478089 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.478102 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.478120 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.478132 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.479923 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.500269 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.516140 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.535067 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.554965 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.581589 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.581681 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.581708 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.581745 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.581772 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.585091 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:33Z\\\",\\\"message\\\":\\\"3158 6016 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 18:30:31.783188 6016 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 18:30:31.783241 6016 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 18:30:31.783274 6016 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 18:30:31.783366 6016 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 18:30:31.783411 6016 factory.go:656] Stopping watch factory\\\\nI0128 18:30:31.783452 6016 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 18:30:31.783478 6016 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 18:30:31.783501 6016 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 18:30:31.783530 6016 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 18:30:31.783574 6016 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 18:30:31.783270 6016 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 18:30:31.783636 6016 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 18:30:31.783679 6016 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 18:30:31.783646 6016 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.604570 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.625841 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.629254 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.629426 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.629519 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.629609 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:30:50.629577298 +0000 UTC m=+56.593760192 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.629614 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.629695 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:50.629683031 +0000 UTC m=+56.593865915 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.629615 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.629745 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:50.629735103 +0000 UTC m=+56.593917987 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.640048 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.663467 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.684653 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.684749 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.684781 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.684817 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.684846 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.731262 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731353 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731422 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.731433 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731452 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731552 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:50.731523461 +0000 UTC m=+56.695706375 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731569 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731596 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731619 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.731678 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:30:50.731659726 +0000 UTC m=+56.695842630 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.773300 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 11:34:54.111419038 +0000 UTC Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.787755 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.787836 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.787859 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.787887 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.787911 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.795043 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.795275 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.795424 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.795483 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.795690 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:34 crc kubenswrapper[4767]: E0128 18:30:34.795915 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.810673 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.846634 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.863054 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.877714 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.893836 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.895612 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.895712 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.895726 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.895747 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.895762 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.912599 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.931782 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:33Z\\\",\\\"message\\\":\\\"3158 6016 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 18:30:31.783188 6016 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 18:30:31.783241 6016 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 18:30:31.783274 6016 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 18:30:31.783366 6016 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 18:30:31.783411 6016 factory.go:656] Stopping watch factory\\\\nI0128 18:30:31.783452 6016 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 18:30:31.783478 6016 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 18:30:31.783501 6016 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 18:30:31.783530 6016 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 18:30:31.783574 6016 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 18:30:31.783270 6016 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 18:30:31.783636 6016 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 18:30:31.783679 6016 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 18:30:31.783646 6016 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.944857 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.961427 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.973730 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.987658 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.997995 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.998057 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.998071 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.998085 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.998094 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:34Z","lastTransitionTime":"2026-01-28T18:30:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:34 crc kubenswrapper[4767]: I0128 18:30:34.999722 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:34Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.012048 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:35Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.022714 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:35Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.036046 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:35Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.053421 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/0.log" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.053751 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:35Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.056272 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.070674 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:35Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.100802 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.100854 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.100866 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.100882 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.100894 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.203950 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.203989 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.203998 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.204017 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.204028 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.306505 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.306551 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.306562 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.306581 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.306599 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.408897 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.408986 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.409001 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.409019 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.409032 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.511597 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.511640 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.511649 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.511662 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.511671 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.614082 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.614117 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.614125 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.614138 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.614149 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.716605 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.716638 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.716647 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.716661 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.716670 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.774281 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 13:53:09.865054884 +0000 UTC Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.794632 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:35 crc kubenswrapper[4767]: E0128 18:30:35.794796 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.819698 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.819745 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.819756 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.819774 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.819785 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.922099 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.922140 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.922151 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.922168 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:35 crc kubenswrapper[4767]: I0128 18:30:35.922181 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:35Z","lastTransitionTime":"2026-01-28T18:30:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.024867 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.024908 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.024920 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.024936 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.024948 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.050883 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:36 crc kubenswrapper[4767]: E0128 18:30:36.051097 4767 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:36 crc kubenswrapper[4767]: E0128 18:30:36.051269 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs podName:0dc01d59-d401-4c7c-9eec-0a67aa5261fc nodeName:}" failed. No retries permitted until 2026-01-28 18:30:40.051194302 +0000 UTC m=+46.015377216 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs") pod "network-metrics-daemon-qbch4" (UID: "0dc01d59-d401-4c7c-9eec-0a67aa5261fc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.059815 4767 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.080445 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.098503 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.119162 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.128065 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.128117 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.128187 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.128258 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.128283 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.135076 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.146680 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.164935 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.181570 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.196048 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.206711 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.222146 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.230595 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.230661 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.230670 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.230688 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.230705 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.236334 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.258794 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:33Z\\\",\\\"message\\\":\\\"3158 6016 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 18:30:31.783188 6016 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 18:30:31.783241 6016 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 18:30:31.783274 6016 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 18:30:31.783366 6016 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 18:30:31.783411 6016 factory.go:656] Stopping watch factory\\\\nI0128 18:30:31.783452 6016 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 18:30:31.783478 6016 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 18:30:31.783501 6016 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 18:30:31.783530 6016 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 18:30:31.783574 6016 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 18:30:31.783270 6016 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 18:30:31.783636 6016 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 18:30:31.783679 6016 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 18:30:31.783646 6016 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.273283 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.295875 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.306606 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.321365 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.332963 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.333011 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.333028 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.333051 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.333068 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.338657 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:36Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.435571 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.435630 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.435644 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.435660 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.435669 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.538280 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.538321 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.538333 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.538351 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.538363 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.641098 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.641127 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.641135 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.641149 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.641157 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.744001 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.744039 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.744049 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.744065 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.744075 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.774599 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 06:13:29.904364319 +0000 UTC Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.795007 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.795060 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.795093 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:36 crc kubenswrapper[4767]: E0128 18:30:36.795158 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:36 crc kubenswrapper[4767]: E0128 18:30:36.795297 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:36 crc kubenswrapper[4767]: E0128 18:30:36.795390 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.846563 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.846609 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.846619 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.846635 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.846646 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.949624 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.949690 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.949711 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.949734 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:36 crc kubenswrapper[4767]: I0128 18:30:36.949759 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:36Z","lastTransitionTime":"2026-01-28T18:30:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.052826 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.052906 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.052925 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.052950 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.052967 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.066590 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/1.log" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.067712 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/0.log" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.071500 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94" exitCode=1 Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.071555 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.071591 4767 scope.go:117] "RemoveContainer" containerID="4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.072656 4767 scope.go:117] "RemoveContainer" containerID="cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94" Jan 28 18:30:37 crc kubenswrapper[4767]: E0128 18:30:37.073031 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.089920 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.106064 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.128440 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:33Z\\\",\\\"message\\\":\\\"3158 6016 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 18:30:31.783188 6016 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 18:30:31.783241 6016 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 18:30:31.783274 6016 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 18:30:31.783366 6016 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 18:30:31.783411 6016 factory.go:656] Stopping watch factory\\\\nI0128 18:30:31.783452 6016 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 18:30:31.783478 6016 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 18:30:31.783501 6016 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 18:30:31.783530 6016 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 18:30:31.783574 6016 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 18:30:31.783270 6016 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 18:30:31.783636 6016 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 18:30:31.783679 6016 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 18:30:31.783646 6016 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.140993 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.158721 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.158791 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.158811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.158837 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.158889 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.167720 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.184888 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.202825 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.218439 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.233331 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.244730 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.255795 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.261656 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.261691 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.261703 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.261721 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.261734 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.268576 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.282610 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.293870 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.317651 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.332861 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.349601 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:37Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.364635 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.364704 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.364716 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.364736 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.364750 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.466863 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.466913 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.466931 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.466960 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.466980 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.569021 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.569061 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.569072 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.569089 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.569101 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.672520 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.672597 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.672611 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.672628 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.672642 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.775335 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 19:24:55.949372641 +0000 UTC Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.775952 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.775991 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.776006 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.776027 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.776042 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.795283 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:37 crc kubenswrapper[4767]: E0128 18:30:37.795439 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.878967 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.879002 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.879014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.879029 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.879039 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.981977 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.982052 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.982077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.982111 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:37 crc kubenswrapper[4767]: I0128 18:30:37.982135 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:37Z","lastTransitionTime":"2026-01-28T18:30:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.078336 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/1.log" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.083919 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.083949 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.083958 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.083972 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.083981 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.187691 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.187761 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.187784 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.187813 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.187830 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.245127 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.245181 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.245189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.245219 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.245233 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.258986 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:38Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.264433 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.264498 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.264518 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.264545 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.264566 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.285522 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:38Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.292096 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.292153 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.292166 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.292189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.292220 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.308545 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:38Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.312942 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.313086 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.313148 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.313251 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.313339 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.330100 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:38Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.334928 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.334999 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.335019 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.335044 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.335064 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.352104 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:38Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:38Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.352373 4767 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.354661 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.354748 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.354772 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.354801 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.354823 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.457639 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.457693 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.457705 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.457727 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.457740 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.560800 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.560849 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.560862 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.560883 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.560896 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.663911 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.664070 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.664095 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.664114 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.664125 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.767282 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.767372 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.767393 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.767425 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.767447 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.776396 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 21:10:45.033336803 +0000 UTC Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.795175 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.795305 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.795485 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.795668 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.795738 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.795909 4767 scope.go:117] "RemoveContainer" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" Jan 28 18:30:38 crc kubenswrapper[4767]: E0128 18:30:38.795951 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.869458 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.869489 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.869499 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.869512 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.869521 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.971542 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.971570 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.971579 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.971592 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:38 crc kubenswrapper[4767]: I0128 18:30:38.971602 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:38Z","lastTransitionTime":"2026-01-28T18:30:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.073683 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.073717 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.073728 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.073742 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.073758 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.176481 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.176543 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.176564 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.176591 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.176612 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.279720 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.279769 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.279779 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.279796 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.279807 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.382609 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.382641 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.382652 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.382665 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.382673 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.484746 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.484777 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.484787 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.484802 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.484816 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.587241 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.587278 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.587286 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.587299 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.587307 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.689616 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.689665 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.689682 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.689703 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.689716 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.776753 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 05:46:47.245376191 +0000 UTC Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.791665 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.791714 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.791723 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.791739 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.791767 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.794520 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:39 crc kubenswrapper[4767]: E0128 18:30:39.794652 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.894437 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.894473 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.894482 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.894496 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.894505 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.997454 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.997504 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.997515 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.997533 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:39 crc kubenswrapper[4767]: I0128 18:30:39.997547 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:39Z","lastTransitionTime":"2026-01-28T18:30:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.092792 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.094887 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.095026 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac"} Jan 28 18:30:40 crc kubenswrapper[4767]: E0128 18:30:40.095166 4767 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:40 crc kubenswrapper[4767]: E0128 18:30:40.095346 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs podName:0dc01d59-d401-4c7c-9eec-0a67aa5261fc nodeName:}" failed. No retries permitted until 2026-01-28 18:30:48.095307206 +0000 UTC m=+54.059490250 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs") pod "network-metrics-daemon-qbch4" (UID: "0dc01d59-d401-4c7c-9eec-0a67aa5261fc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.095700 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.099596 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.099643 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.099666 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.099690 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.099712 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.134569 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.151861 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.171906 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.189883 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.202905 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.202966 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.202978 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.202999 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.203013 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.209632 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.227871 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.253508 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4d1bfbcbbee1a360b611f283c93ce90ac09c269cfd0eab331396b01f04d99705\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:33Z\\\",\\\"message\\\":\\\"3158 6016 handler.go:190] Sending *v1.Pod event handler 6 for removal\\\\nI0128 18:30:31.783188 6016 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0128 18:30:31.783241 6016 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0128 18:30:31.783274 6016 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0128 18:30:31.783366 6016 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0128 18:30:31.783411 6016 factory.go:656] Stopping watch factory\\\\nI0128 18:30:31.783452 6016 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0128 18:30:31.783478 6016 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0128 18:30:31.783501 6016 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0128 18:30:31.783530 6016 handler.go:208] Removed *v1.Pod event handler 6\\\\nI0128 18:30:31.783574 6016 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0128 18:30:31.783270 6016 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0128 18:30:31.783636 6016 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0128 18:30:31.783679 6016 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0128 18:30:31.783646 6016 reflector.go:311] Stopping reflector *v1.Namespace (0s) from k8s.io/client-go/informers/f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.267867 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.286024 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.298000 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.305732 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.305783 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.305799 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.305820 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.305843 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.319588 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.333132 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.350018 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.363722 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.383226 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.409088 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.409447 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.409508 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.409529 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.409558 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.409579 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.430542 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:40Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.513430 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.513484 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.513500 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.513520 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.513537 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.616149 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.616190 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.616218 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.616237 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.616252 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.718988 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.719042 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.719053 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.719071 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.719080 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.777372 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 02:42:15.010438568 +0000 UTC Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.794759 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.794824 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:40 crc kubenswrapper[4767]: E0128 18:30:40.794957 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.795006 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:40 crc kubenswrapper[4767]: E0128 18:30:40.795173 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:40 crc kubenswrapper[4767]: E0128 18:30:40.795171 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.821500 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.821547 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.821557 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.821575 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.821585 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.923981 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.924251 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.924265 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.924282 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:40 crc kubenswrapper[4767]: I0128 18:30:40.924294 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:40Z","lastTransitionTime":"2026-01-28T18:30:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.026886 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.026946 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.026960 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.026974 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.026983 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.129784 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.129827 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.129839 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.129861 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.129876 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.234038 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.234114 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.234128 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.234147 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.234175 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.337922 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.337986 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.338001 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.338020 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.338052 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.405632 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.407912 4767 scope.go:117] "RemoveContainer" containerID="cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94" Jan 28 18:30:41 crc kubenswrapper[4767]: E0128 18:30:41.408303 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.421543 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.434258 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.440320 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.440353 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.440365 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.440380 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.440390 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.453623 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.465266 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.477994 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.488437 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.503060 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.516673 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.527466 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.537317 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.542953 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.542998 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.543007 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.543020 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.543047 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.550815 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.562652 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.575414 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.588042 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.621781 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.638489 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.646038 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.646114 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.646131 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.646156 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.646189 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.656904 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:41Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.749010 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.749058 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.749068 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.749084 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.749094 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.778560 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 23:14:05.454043781 +0000 UTC Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.794992 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:41 crc kubenswrapper[4767]: E0128 18:30:41.795176 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.852861 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.852908 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.852921 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.852941 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.852959 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.955518 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.955558 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.955566 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.955582 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:41 crc kubenswrapper[4767]: I0128 18:30:41.955593 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:41Z","lastTransitionTime":"2026-01-28T18:30:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.058447 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.058511 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.058528 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.058553 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.058576 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.161566 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.161642 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.161660 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.161685 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.161701 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.264495 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.264536 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.264548 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.264564 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.264573 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.367523 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.367585 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.367627 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.367658 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.367679 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.471252 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.471304 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.471316 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.471332 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.471344 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.573683 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.573726 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.573744 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.573762 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.573775 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.676743 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.676796 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.676813 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.676844 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.676868 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.778884 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 10:24:02.853735175 +0000 UTC Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.779316 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.779361 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.779374 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.779391 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.779405 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.795496 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.795574 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.795639 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:42 crc kubenswrapper[4767]: E0128 18:30:42.795773 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:42 crc kubenswrapper[4767]: E0128 18:30:42.796048 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:42 crc kubenswrapper[4767]: E0128 18:30:42.796197 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.882308 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.882394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.882421 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.882451 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.882472 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.985884 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.985925 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.985937 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.985953 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:42 crc kubenswrapper[4767]: I0128 18:30:42.985965 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:42Z","lastTransitionTime":"2026-01-28T18:30:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.088228 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.088301 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.088318 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.088738 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.088799 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.194680 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.194733 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.194748 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.194767 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.194778 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.297238 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.297290 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.297303 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.297323 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.297340 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.399443 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.399477 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.399485 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.399499 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.399507 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.502394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.502449 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.502466 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.502489 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.502508 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.605916 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.606004 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.606036 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.606066 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.606090 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.709867 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.709925 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.709942 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.709968 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.709988 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.779404 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 08:53:38.213373526 +0000 UTC Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.794860 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:43 crc kubenswrapper[4767]: E0128 18:30:43.795064 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.814023 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.814067 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.814080 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.814099 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.814112 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.917437 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.917515 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.917536 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.917568 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:43 crc kubenswrapper[4767]: I0128 18:30:43.917591 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:43Z","lastTransitionTime":"2026-01-28T18:30:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.019898 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.019988 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.020000 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.020015 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.020026 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.122075 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.122140 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.122153 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.122171 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.122182 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.225045 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.225093 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.225113 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.225141 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.225157 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.328469 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.328674 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.328692 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.328716 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.328735 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.431012 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.431060 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.431071 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.431094 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.431107 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.533456 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.533494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.533506 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.533521 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.533531 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.636183 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.636259 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.636273 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.636293 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.636308 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.739173 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.739259 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.739284 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.739307 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.739322 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.780229 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 18:18:12.760898881 +0000 UTC Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.794537 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.794683 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:44 crc kubenswrapper[4767]: E0128 18:30:44.794831 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.794871 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:44 crc kubenswrapper[4767]: E0128 18:30:44.795031 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:44 crc kubenswrapper[4767]: E0128 18:30:44.795169 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.806646 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.820269 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.832507 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.842031 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.842082 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.842095 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.842114 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.842126 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.845432 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.856298 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.867520 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.877755 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.890892 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.902757 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.920698 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.931961 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.944687 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.944717 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.944725 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.944738 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.944747 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:44Z","lastTransitionTime":"2026-01-28T18:30:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.959062 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.971681 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.982072 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:44 crc kubenswrapper[4767]: I0128 18:30:44.992684 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:44Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.005184 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:45Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.022194 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:45Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.047483 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.047525 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.047538 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.047557 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.047567 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.149414 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.149454 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.149466 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.149481 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.149492 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.252256 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.252299 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.252308 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.252323 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.252334 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.354703 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.354745 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.354754 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.354772 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.354794 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.457009 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.457052 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.457062 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.457077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.457087 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.559225 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.559276 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.559288 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.559308 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.559320 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.661935 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.661982 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.661994 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.662015 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.662026 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.764850 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.764889 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.764901 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.764918 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.764931 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.780446 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 23:58:11.042880516 +0000 UTC Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.794898 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:45 crc kubenswrapper[4767]: E0128 18:30:45.795087 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.866628 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.866685 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.866705 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.866737 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.866759 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.969410 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.969474 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.969491 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.969520 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:45 crc kubenswrapper[4767]: I0128 18:30:45.969538 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:45Z","lastTransitionTime":"2026-01-28T18:30:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.073894 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.073946 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.073962 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.073983 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.073996 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.175978 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.176018 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.176029 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.176046 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.176057 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.278502 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.278560 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.278574 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.278592 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.278605 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.381274 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.381311 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.381327 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.381353 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.381367 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.484141 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.484178 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.484188 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.484231 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.484243 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.591170 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.591230 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.591241 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.591257 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.591268 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.693778 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.693808 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.693817 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.693830 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.693838 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.781476 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 20:10:23.877628932 +0000 UTC Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.795089 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:46 crc kubenswrapper[4767]: E0128 18:30:46.795192 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.795466 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:46 crc kubenswrapper[4767]: E0128 18:30:46.795588 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.795612 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:46 crc kubenswrapper[4767]: E0128 18:30:46.795691 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.796973 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.797011 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.797022 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.797045 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.797057 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.899599 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.899668 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.899684 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.899708 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:46 crc kubenswrapper[4767]: I0128 18:30:46.899722 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:46Z","lastTransitionTime":"2026-01-28T18:30:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.002224 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.002260 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.002270 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.002286 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.002296 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.104931 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.104979 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.104995 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.105015 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.105035 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.207759 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.207833 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.207851 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.207881 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.207903 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.311709 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.311797 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.311822 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.311856 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.311887 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.415111 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.415180 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.415197 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.415251 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.415272 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.519051 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.519091 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.519103 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.519121 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.519135 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.621190 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.621269 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.621288 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.621311 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.621330 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.726242 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.726285 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.726324 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.726344 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.726355 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.782464 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 09:51:34.394652301 +0000 UTC Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.795050 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:47 crc kubenswrapper[4767]: E0128 18:30:47.795347 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.830456 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.830539 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.830566 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.830601 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.830629 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.934473 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.934544 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.934561 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.934587 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:47 crc kubenswrapper[4767]: I0128 18:30:47.934606 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:47Z","lastTransitionTime":"2026-01-28T18:30:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.038491 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.038626 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.038649 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.038679 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.038697 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.054766 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.068684 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.075111 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.103471 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.126701 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.141558 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.141598 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.141609 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.141624 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.141633 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.151775 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.164880 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.181626 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.186189 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.186354 4767 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.186404 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs podName:0dc01d59-d401-4c7c-9eec-0a67aa5261fc nodeName:}" failed. No retries permitted until 2026-01-28 18:31:04.186391059 +0000 UTC m=+70.150573933 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs") pod "network-metrics-daemon-qbch4" (UID: "0dc01d59-d401-4c7c-9eec-0a67aa5261fc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.199434 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.214692 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.228151 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.243809 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.243895 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.243915 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.243937 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.243952 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.258968 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.280950 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.297650 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.314449 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.329541 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.347065 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.347096 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.347104 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.347120 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.347131 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.349445 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.359938 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.371728 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.450323 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.450381 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.450405 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.450426 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.450440 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.469194 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.469244 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.469254 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.469269 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.469278 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.485688 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.491022 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.491106 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.491125 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.491156 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.491177 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.511353 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.515809 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.515890 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.515917 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.515955 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.515993 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.534532 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.537423 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.537460 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.537473 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.537494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.537511 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.550370 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.554029 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.554093 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.554109 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.554130 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.554146 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.568523 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:48Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.568648 4767 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.570114 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.570140 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.570149 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.570164 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.570174 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.673359 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.673413 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.673429 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.673450 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.673465 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.776713 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.776770 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.776784 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.776804 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.776816 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.782913 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 05:03:21.289584051 +0000 UTC Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.795511 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.795670 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.795510 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.795511 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.795891 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:48 crc kubenswrapper[4767]: E0128 18:30:48.796070 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.880032 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.880097 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.880115 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.880141 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.880158 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.983049 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.983122 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.983139 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.983162 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:48 crc kubenswrapper[4767]: I0128 18:30:48.983182 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:48Z","lastTransitionTime":"2026-01-28T18:30:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.085101 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.085145 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.085154 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.085169 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.085178 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.188300 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.188438 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.188478 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.188514 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.188541 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.291246 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.291296 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.291310 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.291331 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.291345 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.394067 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.394108 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.394121 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.394139 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.394153 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.496164 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.496200 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.496237 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.496254 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.496266 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.599381 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.599442 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.599462 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.599483 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.599502 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.701739 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.701785 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.701802 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.701826 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.701842 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.783692 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 19:16:00.709569448 +0000 UTC Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.795164 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:49 crc kubenswrapper[4767]: E0128 18:30:49.795294 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.804458 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.804510 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.804525 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.804548 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.804564 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.907817 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.908105 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.908196 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.908316 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:49 crc kubenswrapper[4767]: I0128 18:30:49.908396 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:49Z","lastTransitionTime":"2026-01-28T18:30:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.011550 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.012110 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.012344 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.012880 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.013131 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.117344 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.117428 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.117449 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.117479 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.117502 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.221120 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.221263 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.221288 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.221318 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.221338 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.324963 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.325067 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.325092 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.325130 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.325154 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.428472 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.428523 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.428532 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.428549 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.428558 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.531351 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.531456 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.531477 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.531510 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.531531 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.634626 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.634687 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.634702 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.634725 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.634738 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.714511 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.714822 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:22.714771713 +0000 UTC m=+88.678954627 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.714902 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.715003 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.715144 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.715283 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:22.715254748 +0000 UTC m=+88.679437812 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.715293 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.715467 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:22.715444724 +0000 UTC m=+88.679627768 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.738311 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.738368 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.738388 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.738416 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.738434 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.784331 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 18:26:25.848905274 +0000 UTC Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.794656 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.794805 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.794805 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.795047 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.795102 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.795167 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.816670 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.816719 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.816848 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.816869 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.816882 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.816945 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:22.816926463 +0000 UTC m=+88.781109337 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.817053 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.817134 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.817169 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:50 crc kubenswrapper[4767]: E0128 18:30:50.817337 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:22.817302625 +0000 UTC m=+88.781485539 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.841277 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.841335 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.841348 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.841369 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.841386 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.943789 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.943823 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.943831 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.943847 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:50 crc kubenswrapper[4767]: I0128 18:30:50.943858 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:50Z","lastTransitionTime":"2026-01-28T18:30:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.046344 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.046430 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.046447 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.046467 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.046484 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.149502 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.149563 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.149580 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.149605 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.149623 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.253501 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.253595 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.253620 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.253654 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.253680 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.357515 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.357576 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.357594 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.357622 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.357640 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.461336 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.461427 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.461457 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.461505 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.461530 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.565258 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.565329 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.565352 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.565377 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.565396 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.668628 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.668716 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.668741 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.668778 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.668809 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.771658 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.771714 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.771730 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.771753 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.771772 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.785135 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 13:28:05.118042822 +0000 UTC Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.795584 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:51 crc kubenswrapper[4767]: E0128 18:30:51.795885 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.827097 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.854523 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:51Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.875159 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.875288 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.875309 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.875341 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.875363 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.879331 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:51Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.903456 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:51Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.923151 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:51Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.956635 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:51Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.978582 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.978627 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.978643 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.978669 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.978716 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:51Z","lastTransitionTime":"2026-01-28T18:30:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:51 crc kubenswrapper[4767]: I0128 18:30:51.988526 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:51Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.004291 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.021579 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.058915 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.080021 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.082834 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.082898 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.082926 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.082961 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.082987 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.102552 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.126105 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.144018 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.160728 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.179312 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.186000 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.186044 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.186061 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.186089 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.186107 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.199611 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.218413 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.233223 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:52Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.288808 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.288855 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.288867 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.288887 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.288903 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.391451 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.391491 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.391500 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.391516 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.391527 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.495093 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.495186 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.495250 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.495283 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.495324 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.597909 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.597979 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.597992 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.598014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.598028 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.701494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.701550 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.701563 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.701591 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.701605 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.786128 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 02:18:22.266300209 +0000 UTC Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.795646 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.795722 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:52 crc kubenswrapper[4767]: E0128 18:30:52.796017 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.796063 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:52 crc kubenswrapper[4767]: E0128 18:30:52.796368 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:52 crc kubenswrapper[4767]: E0128 18:30:52.796469 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.797424 4767 scope.go:117] "RemoveContainer" containerID="cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.804319 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.804405 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.804430 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.804466 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.804492 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.907921 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.908509 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.908524 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.908543 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:52 crc kubenswrapper[4767]: I0128 18:30:52.908556 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:52Z","lastTransitionTime":"2026-01-28T18:30:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.011176 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.011229 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.011265 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.011284 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.011296 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.114713 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.114750 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.114762 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.114778 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.114792 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.141898 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/1.log" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.144393 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.145133 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.161716 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.173815 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.196013 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.217468 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.217868 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.217914 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.217926 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.217953 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.217967 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.236096 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.247139 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.271593 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.321855 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.321913 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.321929 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.321955 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.321971 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.333406 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.362166 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.375985 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.388675 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.401317 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.416030 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.425021 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.425086 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.425099 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.425159 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.425174 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.436890 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.466808 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.491150 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.514435 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.527984 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.528041 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.528054 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.528071 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.528084 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.530084 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:53Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.630699 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.630738 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.630748 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.630765 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.630776 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.732754 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.732817 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.732829 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.732850 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.732861 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.787425 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 13:05:37.139762136 +0000 UTC Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.794921 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:53 crc kubenswrapper[4767]: E0128 18:30:53.795177 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.835202 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.835268 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.835284 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.835305 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.835319 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.937801 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.937860 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.937872 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.937889 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:53 crc kubenswrapper[4767]: I0128 18:30:53.937901 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:53Z","lastTransitionTime":"2026-01-28T18:30:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.041894 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.041964 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.041983 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.042013 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.042035 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.145751 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.145811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.145829 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.145854 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.145872 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.151356 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/2.log" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.152013 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/1.log" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.155728 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" exitCode=1 Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.155758 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.155789 4767 scope.go:117] "RemoveContainer" containerID="cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.157357 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:30:54 crc kubenswrapper[4767]: E0128 18:30:54.157944 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.179549 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.218824 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.238172 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.249028 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.249092 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.249110 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.249138 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.249158 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.254607 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.270489 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.284919 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.308021 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.331511 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:54Z\\\",\\\"message\\\":\\\"IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0128 18:30:53.757696 6490 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI0128 18:30:53.758504 6490 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0128 18:30:53.757576 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.757788 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758519 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.758525 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758536 6490 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-hjjlv in node crc\\\\nF0128 18:30:53.758505 6490 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initia\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.345308 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.351189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.351250 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.351266 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.351290 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.351305 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.372983 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.384281 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.403023 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.419315 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.438263 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.453355 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.453415 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.453436 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.453461 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.453522 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.457881 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.474691 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.491598 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.508036 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.556383 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.556471 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.556495 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.556528 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.556552 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.659163 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.659221 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.659233 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.659253 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.659267 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.767802 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.767871 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.767889 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.767917 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.767935 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.788179 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 04:09:17.553676435 +0000 UTC Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.794791 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.794805 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.794805 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:54 crc kubenswrapper[4767]: E0128 18:30:54.795229 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:54 crc kubenswrapper[4767]: E0128 18:30:54.795200 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:54 crc kubenswrapper[4767]: E0128 18:30:54.795473 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.816782 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.849233 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.870494 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.872294 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.872373 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.872399 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.872435 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.872463 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.886413 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.900970 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.915007 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.930768 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.955444 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cce89426c15ca02787f82029aef8ba644fecd8fca14d848f136ec09fbef08c94\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:36Z\\\",\\\"message\\\":\\\"-multus/network-metrics-daemon-qbch4 in node crc\\\\nI0128 18:30:35.794315 6274 obj_retry.go:365] Adding new object: *v1.Pod openshift-kube-apiserver/kube-apiserver-crc\\\\nI0128 18:30:35.794321 6274 obj_retry.go:386] Retry successful for *v1.Pod openshift-dns/node-resolver-wcxcp after 0 failed attempt(s)\\\\nI0128 18:30:35.794335 6274 default_network_controller.go:776] Recording success event on pod openshift-dns/node-resolver-wcxcp\\\\nI0128 18:30:35.794342 6274 ovn.go:134] Ensuring zone local for Pod openshift-kube-apiserver/kube-apiserver-crc in node crc\\\\nI0128 18:30:35.794344 6274 base_network_controller_pods.go:477] [default/openshift-multus/network-metrics-daemon-qbch4] creating logical port openshift-multus_network-metrics-daemon-qbch4 for pod on switch crc\\\\nI0128 18:30:35.794381 6274 obj_retry.go:303] Retry object setup: *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp\\\\nF0128 18:30:35.794402 6274 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: f\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:34Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:54Z\\\",\\\"message\\\":\\\"IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0128 18:30:53.757696 6490 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI0128 18:30:53.758504 6490 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0128 18:30:53.757576 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.757788 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758519 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.758525 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758536 6490 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-hjjlv in node crc\\\\nF0128 18:30:53.758505 6490 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initia\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.966291 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.975324 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.975398 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.975418 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.975447 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.975468 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:54Z","lastTransitionTime":"2026-01-28T18:30:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:54 crc kubenswrapper[4767]: I0128 18:30:54.988077 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:54Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.004057 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.022568 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.035496 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.046123 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.058419 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.069441 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.078537 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.078575 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.078586 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.078603 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.078613 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.080912 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.093293 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.160337 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/2.log" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.164008 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:30:55 crc kubenswrapper[4767]: E0128 18:30:55.164199 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.175250 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.180385 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.180412 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.180421 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.180453 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.180465 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.188199 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.208483 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:54Z\\\",\\\"message\\\":\\\"IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0128 18:30:53.757696 6490 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI0128 18:30:53.758504 6490 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0128 18:30:53.757576 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.757788 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758519 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.758525 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758536 6490 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-hjjlv in node crc\\\\nF0128 18:30:53.758505 6490 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initia\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.218292 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.230989 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.240516 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.254284 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.266386 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.280258 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.283259 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.283308 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.283319 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.283333 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.283345 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.292798 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.304083 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.314467 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.323544 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.333336 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.350296 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.360676 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.369864 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.379029 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:55Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.385463 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.385513 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.385524 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.385540 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.385552 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.488496 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.488537 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.488559 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.488576 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.488590 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.590591 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.590622 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.590630 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.590643 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.590651 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.694383 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.694461 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.694484 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.694514 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.694528 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.789043 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 14:37:25.635053252 +0000 UTC Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.795404 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:55 crc kubenswrapper[4767]: E0128 18:30:55.795721 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.797336 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.797380 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.797394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.797410 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.797423 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.899438 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.899480 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.899509 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.899526 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:55 crc kubenswrapper[4767]: I0128 18:30:55.899536 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:55Z","lastTransitionTime":"2026-01-28T18:30:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.002933 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.003014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.003038 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.003070 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.003097 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.107143 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.107264 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.107299 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.107331 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.107351 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.211686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.211761 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.211779 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.211808 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.211827 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.316004 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.316077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.316101 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.316127 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.316148 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.418733 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.418780 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.418793 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.418810 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.418838 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.521422 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.521469 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.521480 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.521498 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.521511 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.624795 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.624859 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.624877 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.624901 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.624921 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.728522 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.728598 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.728619 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.728646 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.728667 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.789285 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 04:01:43.370274399 +0000 UTC Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.795408 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.795408 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.795600 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:56 crc kubenswrapper[4767]: E0128 18:30:56.795817 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:56 crc kubenswrapper[4767]: E0128 18:30:56.795983 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:56 crc kubenswrapper[4767]: E0128 18:30:56.796195 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.831284 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.831338 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.831352 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.831371 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.831387 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.933723 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.933766 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.933776 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.933789 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:56 crc kubenswrapper[4767]: I0128 18:30:56.933798 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:56Z","lastTransitionTime":"2026-01-28T18:30:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.036328 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.036374 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.036382 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.036395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.036408 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.139543 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.139638 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.139657 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.139686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.139704 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.242733 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.242797 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.242811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.242828 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.242838 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.345351 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.345434 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.345448 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.345467 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.345482 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.448521 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.448579 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.448602 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.448626 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.448642 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.551952 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.552087 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.552111 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.552138 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.552158 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.654789 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.654901 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.654926 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.654954 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.654973 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.756958 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.757013 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.757025 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.757042 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.757057 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.790301 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 07:42:38.096884738 +0000 UTC Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.794646 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:57 crc kubenswrapper[4767]: E0128 18:30:57.794771 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.859920 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.859976 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.859989 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.860024 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.860034 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.963242 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.963287 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.963319 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.963333 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:57 crc kubenswrapper[4767]: I0128 18:30:57.963343 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:57Z","lastTransitionTime":"2026-01-28T18:30:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.065853 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.065945 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.065979 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.066011 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.066031 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.169360 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.169445 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.169461 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.169482 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.169495 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.272441 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.272529 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.272555 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.272592 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.272618 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.375060 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.375122 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.375137 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.375160 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.375175 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.478687 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.478737 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.478755 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.478779 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.478796 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.580634 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.580672 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.580682 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.580704 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.580715 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.683781 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.683865 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.683882 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.683901 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.683917 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.786127 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.786179 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.786189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.786224 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.786237 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.791306 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 15:50:21.391754137 +0000 UTC Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.794691 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.794793 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.794838 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.794707 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.794957 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.795005 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.862938 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.863007 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.863039 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.863094 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.863111 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.878525 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:58Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.882336 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.882385 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.882402 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.882424 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.882440 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.897262 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:58Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.900809 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.900852 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.900867 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.900891 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.900906 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.915893 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:58Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.921741 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.921805 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.921823 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.921848 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.921864 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.937455 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:58Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.941486 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.941547 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.941565 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.941591 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.941609 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.958329 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:58Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:30:58Z is after 2025-08-24T17:21:41Z" Jan 28 18:30:58 crc kubenswrapper[4767]: E0128 18:30:58.958661 4767 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.960260 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.960308 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.960325 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.960347 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:58 crc kubenswrapper[4767]: I0128 18:30:58.960364 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:58Z","lastTransitionTime":"2026-01-28T18:30:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.062525 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.062559 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.062567 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.062580 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.062590 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.165443 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.165485 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.165494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.165512 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.165523 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.268138 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.268185 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.268195 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.268238 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.268252 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.374069 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.374103 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.374112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.374125 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.374134 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.477895 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.477970 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.477988 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.478015 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.478033 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.581481 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.581574 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.581594 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.581624 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.581643 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.684521 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.684567 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.684576 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.684591 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.684602 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.788514 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.788610 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.788637 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.788677 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.788700 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.791832 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 22:12:33.23031641 +0000 UTC Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.795238 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:30:59 crc kubenswrapper[4767]: E0128 18:30:59.795437 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.891949 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.892007 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.892025 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.892049 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.892066 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.997424 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.997487 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.997505 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.997534 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:30:59 crc kubenswrapper[4767]: I0128 18:30:59.997553 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:30:59Z","lastTransitionTime":"2026-01-28T18:30:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.101422 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.101499 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.101514 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.101547 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.101564 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.205290 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.205364 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.205379 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.205399 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.205416 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.308468 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.308515 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.308526 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.308539 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.308549 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.411300 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.411378 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.411395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.411418 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.411467 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.515364 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.515410 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.515421 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.515442 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.515461 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.618089 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.618185 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.618197 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.618241 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.618253 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.720774 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.720823 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.720832 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.720850 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.720863 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.792895 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-04 01:57:13.280145009 +0000 UTC Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.795238 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.795322 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:00 crc kubenswrapper[4767]: E0128 18:31:00.795463 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.795725 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:00 crc kubenswrapper[4767]: E0128 18:31:00.795825 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:00 crc kubenswrapper[4767]: E0128 18:31:00.796057 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.823532 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.823588 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.823600 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.823618 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.823630 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.925956 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.926003 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.926018 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.926049 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:00 crc kubenswrapper[4767]: I0128 18:31:00.926067 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:00Z","lastTransitionTime":"2026-01-28T18:31:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.028448 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.028485 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.028493 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.028511 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.028520 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.131546 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.131592 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.131603 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.131619 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.131630 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.233611 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.233686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.233699 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.233718 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.233731 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.336366 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.336413 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.336425 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.336443 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.336457 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.438687 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.438747 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.438769 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.438799 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.438821 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.541056 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.541108 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.541118 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.541139 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.541150 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.643111 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.643150 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.643158 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.643172 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.643182 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.744822 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.744875 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.744886 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.744899 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.744908 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.794002 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 11:14:48.245013379 +0000 UTC Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.795243 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:01 crc kubenswrapper[4767]: E0128 18:31:01.795360 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.846875 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.846917 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.846926 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.847125 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.847136 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.949408 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.949454 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.949466 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.949483 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:01 crc kubenswrapper[4767]: I0128 18:31:01.949495 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:01Z","lastTransitionTime":"2026-01-28T18:31:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.051623 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.051670 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.051684 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.051704 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.051717 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.153985 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.154024 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.154035 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.154051 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.154065 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.256051 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.256100 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.256110 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.256128 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.256241 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.358395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.358436 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.358447 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.358463 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.358476 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.460898 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.460940 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.460948 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.460962 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.460970 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.563267 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.563302 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.563311 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.563326 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.563338 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.666476 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.666531 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.666542 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.666564 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.666578 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.769271 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.769319 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.769335 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.769357 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.769373 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.795466 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:02 crc kubenswrapper[4767]: E0128 18:31:02.795630 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.795912 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:02 crc kubenswrapper[4767]: E0128 18:31:02.796010 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.796177 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 20:49:51.23522369 +0000 UTC Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.796262 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:02 crc kubenswrapper[4767]: E0128 18:31:02.796324 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.872284 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.872357 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.872384 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.872415 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.872441 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.974734 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.974919 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.975014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.975090 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:02 crc kubenswrapper[4767]: I0128 18:31:02.975164 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:02Z","lastTransitionTime":"2026-01-28T18:31:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.077577 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.077639 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.077657 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.077682 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.077702 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.181156 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.181264 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.181320 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.181344 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.181359 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.285284 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.285775 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.285798 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.285826 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.285847 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.389249 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.389310 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.389325 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.389346 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.389363 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.491754 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.491810 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.491820 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.491833 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.491842 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.594691 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.594728 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.594738 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.594754 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.594765 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.697715 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.697780 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.697789 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.697804 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.697813 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.794845 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:03 crc kubenswrapper[4767]: E0128 18:31:03.794994 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.797158 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 03:50:55.010205588 +0000 UTC Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.800044 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.800105 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.800118 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.800141 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.800153 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.901728 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.901771 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.901782 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.901797 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:03 crc kubenswrapper[4767]: I0128 18:31:03.901808 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:03Z","lastTransitionTime":"2026-01-28T18:31:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.003780 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.003829 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.003837 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.003852 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.003861 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.106349 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.106400 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.106412 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.106429 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.106441 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.208391 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.208451 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.208463 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.208481 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.208493 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.267462 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:04 crc kubenswrapper[4767]: E0128 18:31:04.267639 4767 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:31:04 crc kubenswrapper[4767]: E0128 18:31:04.267708 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs podName:0dc01d59-d401-4c7c-9eec-0a67aa5261fc nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.267689671 +0000 UTC m=+102.231872555 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs") pod "network-metrics-daemon-qbch4" (UID: "0dc01d59-d401-4c7c-9eec-0a67aa5261fc") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.310953 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.311011 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.311027 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.311052 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.311070 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.413054 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.413140 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.413153 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.413170 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.413180 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.515236 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.515268 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.515276 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.515290 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.515299 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.617353 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.617396 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.617408 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.617424 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.617434 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.719487 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.719526 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.719539 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.719584 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.719593 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.794814 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:04 crc kubenswrapper[4767]: E0128 18:31:04.794936 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.795010 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:04 crc kubenswrapper[4767]: E0128 18:31:04.795119 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.795188 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:04 crc kubenswrapper[4767]: E0128 18:31:04.795258 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.797350 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 07:36:49.290162209 +0000 UTC Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.809323 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.822076 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.822112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.822122 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.822140 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.822154 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.826948 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:54Z\\\",\\\"message\\\":\\\"IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0128 18:30:53.757696 6490 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI0128 18:30:53.758504 6490 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0128 18:30:53.757576 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.757788 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758519 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.758525 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758536 6490 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-hjjlv in node crc\\\\nF0128 18:30:53.758505 6490 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initia\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.837510 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.849346 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.859364 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.876696 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.889088 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.900815 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.913875 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.924075 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.924112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.924123 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.924138 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.924149 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:04Z","lastTransitionTime":"2026-01-28T18:31:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.925354 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.936876 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.953075 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.962170 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.977919 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.989007 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:04 crc kubenswrapper[4767]: I0128 18:31:04.998999 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:04Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.009383 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:05Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.020810 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:05Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.026319 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.026360 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.026369 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.026384 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.026393 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.128463 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.128495 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.128504 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.128518 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.128527 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.230337 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.230380 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.230391 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.230408 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.230418 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.332515 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.332561 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.332570 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.332587 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.332596 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.434529 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.434576 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.434584 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.434598 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.434608 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.536787 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.536846 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.536862 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.536884 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.536899 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.639450 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.639485 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.639494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.639507 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.639517 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.742168 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.742234 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.742245 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.742262 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.742273 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.794884 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:05 crc kubenswrapper[4767]: E0128 18:31:05.795028 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.797915 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 21:37:09.729091981 +0000 UTC Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.843901 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.843940 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.843950 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.843966 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.843978 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.946457 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.946492 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.946501 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.946516 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:05 crc kubenswrapper[4767]: I0128 18:31:05.946526 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:05Z","lastTransitionTime":"2026-01-28T18:31:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.048673 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.048716 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.048726 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.048740 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.048750 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.151259 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.151292 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.151301 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.151317 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.151328 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.253286 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.253321 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.253330 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.253343 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.253353 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.355736 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.355807 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.355817 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.355831 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.355841 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.457738 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.457795 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.457807 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.457825 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.457838 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.561326 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.561372 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.561380 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.561395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.561404 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.664195 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.664268 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.664279 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.664296 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.664309 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.766926 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.766972 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.766983 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.767001 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.767015 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.794928 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.794955 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:06 crc kubenswrapper[4767]: E0128 18:31:06.795095 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.795248 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:06 crc kubenswrapper[4767]: E0128 18:31:06.795308 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:06 crc kubenswrapper[4767]: E0128 18:31:06.795375 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.798437 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 02:32:57.647413007 +0000 UTC Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.869373 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.869418 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.869430 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.869448 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.869463 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.971471 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.971538 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.971549 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.971565 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:06 crc kubenswrapper[4767]: I0128 18:31:06.971577 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:06Z","lastTransitionTime":"2026-01-28T18:31:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.073949 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.074010 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.074034 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.074065 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.074089 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.176323 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.176368 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.176380 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.176396 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.176407 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.278812 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.278870 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.278887 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.278910 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.278927 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.381201 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.381300 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.381317 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.381343 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.381359 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.484595 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.484634 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.484645 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.484662 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.484674 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.586809 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.586865 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.586882 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.586905 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.586924 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.689279 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.689340 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.689353 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.689374 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.689389 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.792014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.792063 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.792075 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.792092 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.792104 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.795395 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:07 crc kubenswrapper[4767]: E0128 18:31:07.795775 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.795874 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:31:07 crc kubenswrapper[4767]: E0128 18:31:07.796017 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\"" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.798616 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 09:41:06.609072349 +0000 UTC Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.894327 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.894387 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.894404 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.894429 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.894447 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.996442 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.996481 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.996493 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.996509 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:07 crc kubenswrapper[4767]: I0128 18:31:07.996518 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:07Z","lastTransitionTime":"2026-01-28T18:31:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.099916 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.099966 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.099978 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.099999 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.100011 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.201977 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.202028 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.202039 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.202057 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.202068 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.303732 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.303762 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.303771 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.303785 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.303796 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.406314 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.406372 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.406386 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.406411 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.406488 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.509283 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.509319 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.509327 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.509342 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.509351 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.612131 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.612180 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.612200 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.612251 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.612273 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.715170 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.715221 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.715230 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.715245 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.715254 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.795300 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.795430 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.795531 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:08 crc kubenswrapper[4767]: E0128 18:31:08.795529 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:08 crc kubenswrapper[4767]: E0128 18:31:08.795648 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:08 crc kubenswrapper[4767]: E0128 18:31:08.795783 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.798810 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 10:26:12.56475488 +0000 UTC Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.816873 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.816915 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.816930 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.816949 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.816963 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.920069 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.920115 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.920127 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.920145 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:08 crc kubenswrapper[4767]: I0128 18:31:08.920158 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:08Z","lastTransitionTime":"2026-01-28T18:31:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.022670 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.022712 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.022724 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.022741 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.022754 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.071624 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.071655 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.071664 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.071675 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.071684 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: E0128 18:31:09.085157 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.088368 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.088402 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.088411 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.088428 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.088440 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: E0128 18:31:09.099447 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.103925 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.103959 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.103968 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.103983 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.103991 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: E0128 18:31:09.120792 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.124412 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.124464 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.124474 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.124488 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.124496 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: E0128 18:31:09.135777 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.139845 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.139874 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.139886 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.139902 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.139913 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: E0128 18:31:09.151484 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: E0128 18:31:09.151607 4767 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.152651 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.152885 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.152894 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.152909 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.152918 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.206732 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hjjlv_5a8e6ea7-4d55-4222-840b-c0383a9bc7da/kube-multus/0.log" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.206788 4767 generic.go:334] "Generic (PLEG): container finished" podID="5a8e6ea7-4d55-4222-840b-c0383a9bc7da" containerID="4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1" exitCode=1 Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.206814 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hjjlv" event={"ID":"5a8e6ea7-4d55-4222-840b-c0383a9bc7da","Type":"ContainerDied","Data":"4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.207119 4767 scope.go:117] "RemoveContainer" containerID="4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.219788 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.255432 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.255468 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.255477 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.255492 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.255503 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.261592 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.273287 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.284101 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.298276 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.309093 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.319091 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"2026-01-28T18:30:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cf730950-6cca-473d-b324-df7018131d0a\\\\n2026-01-28T18:30:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cf730950-6cca-473d-b324-df7018131d0a to /host/opt/cni/bin/\\\\n2026-01-28T18:30:23Z [verbose] multus-daemon started\\\\n2026-01-28T18:30:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T18:31:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.336184 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:54Z\\\",\\\"message\\\":\\\"IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0128 18:30:53.757696 6490 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI0128 18:30:53.758504 6490 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0128 18:30:53.757576 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.757788 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758519 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.758525 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758536 6490 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-hjjlv in node crc\\\\nF0128 18:30:53.758505 6490 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initia\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.345460 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.356818 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.358080 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.358112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.358121 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.358137 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.358146 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.367385 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.379815 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.389795 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.399315 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.409778 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.419930 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.430776 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.441739 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:09Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.460403 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.460440 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.460453 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.460471 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.460483 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.562960 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.562989 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.562997 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.563010 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.563018 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.665464 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.665508 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.665518 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.665532 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.665542 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.770748 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.770796 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.770808 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.770824 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.770834 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.795333 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:09 crc kubenswrapper[4767]: E0128 18:31:09.795478 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.799386 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 09:49:14.786889929 +0000 UTC Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.873884 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.873920 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.873932 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.873950 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.873963 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.977133 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.977172 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.977183 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.977199 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:09 crc kubenswrapper[4767]: I0128 18:31:09.977242 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:09Z","lastTransitionTime":"2026-01-28T18:31:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.083042 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.083103 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.083118 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.083141 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.083161 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.185846 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.185882 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.185890 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.185903 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.185912 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.211830 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hjjlv_5a8e6ea7-4d55-4222-840b-c0383a9bc7da/kube-multus/0.log" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.211941 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hjjlv" event={"ID":"5a8e6ea7-4d55-4222-840b-c0383a9bc7da","Type":"ContainerStarted","Data":"64bb8fae529e99311f52941cdce4bc8b89a63b6c8a6bd18a21f0450a81d076cf"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.225133 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.243020 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.255775 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.267831 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.280869 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.291242 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.291916 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.292013 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.292096 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.292174 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.292099 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.314407 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.330345 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.342239 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.358903 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.371015 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.388500 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64bb8fae529e99311f52941cdce4bc8b89a63b6c8a6bd18a21f0450a81d076cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"2026-01-28T18:30:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cf730950-6cca-473d-b324-df7018131d0a\\\\n2026-01-28T18:30:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cf730950-6cca-473d-b324-df7018131d0a to /host/opt/cni/bin/\\\\n2026-01-28T18:30:23Z [verbose] multus-daemon started\\\\n2026-01-28T18:30:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T18:31:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.393939 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.393992 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.394007 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.394031 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.394049 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.409827 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:54Z\\\",\\\"message\\\":\\\"IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0128 18:30:53.757696 6490 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI0128 18:30:53.758504 6490 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0128 18:30:53.757576 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.757788 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758519 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.758525 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758536 6490 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-hjjlv in node crc\\\\nF0128 18:30:53.758505 6490 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initia\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.419266 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.434080 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.443259 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.457730 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.470074 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:10Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.497071 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.497152 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.497177 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.497238 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.497265 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.600190 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.600253 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.600286 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.600306 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.600319 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.701879 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.701911 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.701919 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.701932 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.701941 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.795024 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.795107 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:10 crc kubenswrapper[4767]: E0128 18:31:10.795138 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.795043 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:10 crc kubenswrapper[4767]: E0128 18:31:10.795277 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:10 crc kubenswrapper[4767]: E0128 18:31:10.795377 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.799875 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 19:08:06.95675124 +0000 UTC Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.803644 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.803675 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.803686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.803701 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.803712 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.906393 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.906426 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.906437 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.906455 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:10 crc kubenswrapper[4767]: I0128 18:31:10.906469 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:10Z","lastTransitionTime":"2026-01-28T18:31:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.008882 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.009176 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.009343 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.009477 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.009701 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.111981 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.112317 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.112404 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.112494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.112574 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.214900 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.214945 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.214957 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.214979 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.214992 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.320631 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.320674 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.320685 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.320700 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.320716 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.422791 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.422861 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.422875 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.422892 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.423275 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.525760 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.525784 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.525792 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.525805 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.525814 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.627897 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.627932 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.627940 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.627954 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.627962 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.729934 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.730177 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.730288 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.730398 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.730461 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.794921 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:11 crc kubenswrapper[4767]: E0128 18:31:11.795290 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.800663 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 05:46:38.139511747 +0000 UTC Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.832910 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.833343 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.833461 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.833555 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.833671 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.935605 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.935868 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.935934 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.935999 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:11 crc kubenswrapper[4767]: I0128 18:31:11.936065 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:11Z","lastTransitionTime":"2026-01-28T18:31:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.038254 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.038516 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.038593 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.038661 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.038721 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.140771 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.141035 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.141146 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.141264 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.141350 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.244607 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.244657 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.244670 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.244687 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.245082 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.347978 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.348013 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.348021 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.348034 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.348044 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.450889 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.450960 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.450980 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.451006 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.451028 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.553325 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.553368 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.553379 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.553391 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.553401 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.655296 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.655382 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.655394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.655409 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.655419 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.757983 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.758051 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.758063 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.758083 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.758095 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.794796 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.794854 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.794889 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:12 crc kubenswrapper[4767]: E0128 18:31:12.794914 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:12 crc kubenswrapper[4767]: E0128 18:31:12.795293 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:12 crc kubenswrapper[4767]: E0128 18:31:12.795412 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.801326 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-13 21:53:50.045484482 +0000 UTC Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.860790 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.860830 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.860843 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.860858 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.860867 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.963619 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.963659 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.963673 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.963691 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:12 crc kubenswrapper[4767]: I0128 18:31:12.963704 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:12Z","lastTransitionTime":"2026-01-28T18:31:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.066735 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.066780 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.066788 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.066806 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.066814 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.169572 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.169651 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.169664 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.169681 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.169720 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.272383 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.272425 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.272433 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.272446 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.272458 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.374329 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.374405 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.374428 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.374459 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.374481 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.476596 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.476659 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.476675 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.476700 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.476716 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.579885 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.579950 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.579972 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.580000 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.580023 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.682857 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.682930 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.682954 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.682983 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.683005 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.812519 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 05:07:13.623402842 +0000 UTC Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.813002 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:13 crc kubenswrapper[4767]: E0128 18:31:13.813549 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.814656 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.814730 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.814752 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.814779 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.814800 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.917182 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.917253 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.917266 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.917283 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:13 crc kubenswrapper[4767]: I0128 18:31:13.917295 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:13Z","lastTransitionTime":"2026-01-28T18:31:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.020111 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.020174 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.020259 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.020286 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.020304 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.122634 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.122671 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.122681 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.122696 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.122707 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.225679 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.225727 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.225738 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.225756 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.225774 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.328729 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.328782 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.328790 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.328803 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.328813 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.430660 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.430718 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.430736 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.430762 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.430786 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.533005 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.533040 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.533049 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.533061 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.533070 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.636345 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.636404 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.636413 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.636431 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.636440 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.739065 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.739128 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.739149 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.739172 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.739187 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.795331 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:14 crc kubenswrapper[4767]: E0128 18:31:14.795479 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.795347 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:14 crc kubenswrapper[4767]: E0128 18:31:14.795706 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.795559 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:14 crc kubenswrapper[4767]: E0128 18:31:14.795805 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.812505 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f7b0efe8-6b8d-4aac-af64-ac8af3a8ca6d\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9c6d7734d6d813eb00e55c37f4be21149f90f52e3c3b764b1e7f140c02d00235\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f721ff41c28a92f454b60651292d4f3f82c47e5d39e18f4917c0f6451bc88e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://a7949df7b03e7956207dfc8079e457f2455f3e3c0bffd84ac37df2df7962e095\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d48d94311200e144e508768a8a350988b875a35fd8f3f3e907ace0e1717097ce\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.812731 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 11:05:22.631205268 +0000 UTC Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.835534 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b328212-fa4a-4241-8076-f3fa94f5e004\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8a11566b14ca201d10cc1895918c54bfd0f13e3604b417c61b3aa65091739b61\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3a6d0e9d4facbbd55d78fbd195f4b2a73e9050a75566d9879aee20063c3693a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b44f1bc0a1dd5287d845e97ce4752274374cb829771951236817d11c6ada5f8c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ac11e612a10add3903c3f25f252c31d9c8f43c43f3cdf942dfd86957d4bc4e32\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:00Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://3f9118b38831aad3cbbf9f4ef47d5c30b04516f9a5dab07a3b0a90ac80724b5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:59Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e038de27f676d1ef4175ff63eacc5767701a3b2e03ed08c76ee8257440f06c5f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7db5724a77b3850e603751dc79aecbeb4f0c738073e71f99e9f68529b99bf957\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5a2c61b2d054dce8795fd4d4713a114752bbb5216254eb1b89d386c19a6edfba\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:58Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.841197 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.841270 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.841280 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.841294 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.841305 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.850614 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://94c34d9cc11983b9f65b91b2348cd0dcefc6e9e9cec49f92327fc62331b2747a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.866617 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.885882 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f729b63a-09d0-4095-add6-3e40fbd43e1c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e7ee6b53650b1776a047b095e3547d3e0cb8ecebfeb06cde6ae685d48c67db1d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-r6jtp\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-skvzp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.903499 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.915832 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-hjjlv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a8e6ea7-4d55-4222-840b-c0383a9bc7da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://64bb8fae529e99311f52941cdce4bc8b89a63b6c8a6bd18a21f0450a81d076cf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:31:09Z\\\",\\\"message\\\":\\\"2026-01-28T18:30:22+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_cf730950-6cca-473d-b324-df7018131d0a\\\\n2026-01-28T18:30:22+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_cf730950-6cca-473d-b324-df7018131d0a to /host/opt/cni/bin/\\\\n2026-01-28T18:30:23Z [verbose] multus-daemon started\\\\n2026-01-28T18:30:23Z [verbose] Readiness Indicator file check\\\\n2026-01-28T18:31:08Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:31:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gk46x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-hjjlv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.932392 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-28T18:30:54Z\\\",\\\"message\\\":\\\"IPs:[]string(nil), V6IPs:[]string(nil)}, nodeEndpoints:map[string]services.lbEndpoints{}, externalTrafficLocal:false, internalTrafficLocal:false, hasNodePort:false}}\\\\nI0128 18:30:53.757696 6490 obj_retry.go:386] Retry successful for *v1.Pod openshift-kube-controller-manager/kube-controller-manager-crc after 0 failed attempt(s)\\\\nI0128 18:30:53.758504 6490 services_controller.go:444] Built service openshift-kube-apiserver/apiserver LB per-node configs for network=default: []services.lbConfig(nil)\\\\nI0128 18:30:53.757576 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.757788 6490 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758519 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-qbch4\\\\nI0128 18:30:53.758525 6490 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/multus-hjjlv\\\\nI0128 18:30:53.758536 6490 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-hjjlv in node crc\\\\nF0128 18:30:53.758505 6490 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initia\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:52Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-mwmbk_openshift-ovn-kubernetes(0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dzlbv\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-mwmbk\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.942277 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-28qmq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7537ad70-081f-465c-bead-4f4e288c8405\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5f42fb406bed972ab1ad75133504f9a3e354bd52c6318c2559ec0a0776e04343\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hwrf8\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:20Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-28qmq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.943980 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.944017 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.944031 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.944051 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.944064 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:14Z","lastTransitionTime":"2026-01-28T18:31:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.957268 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"99bd7dda-cedd-4898-920b-f77c5b0dd10e\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-28T18:30:16Z\\\",\\\"message\\\":\\\"falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0128 18:30:11.399918 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0128 18:30:11.400678 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-17235416/tls.crt::/tmp/serving-cert-17235416/tls.key\\\\\\\"\\\\nI0128 18:30:16.903923 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0128 18:30:16.907328 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0128 18:30:16.907388 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0128 18:30:16.907434 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0128 18:30:16.907459 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0128 18:30:16.920392 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0128 18:30:16.920433 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920439 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0128 18:30:16.920445 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0128 18:30:16.920449 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0128 18:30:16.920453 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0128 18:30:16.920456 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0128 18:30:16.920810 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0128 18:30:16.922486 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:01Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:58Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:29:56Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.969453 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-wcxcp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9f8067a8-acf1-4dcd-bf77-1e1dea881a39\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://342b5e7224148dea0f8b07bb9fe2f6cb8f4ddc7a1a31beb950d9e1fd6a8c4f1c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-zs9t5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-wcxcp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.983317 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-snvkm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"688e15c3-4cd8-41ee-a2c4-f1b31bad4afe\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dc79d92ff79b1da97e3e4f4f5012549a6820d3c3b05a3a62321a8970074640c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://8b733dd2f8c763a5c10f5e05b3d0a3c5ba1cf38a013e6a71be16818f276afa3a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:20Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f670274c07382ad393bb85ce01357dc1e75d0d2ec1558c501497774773ba08e9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c200db919181a3f3458188adb844f456752a2b3c17c5f891218dc5cfd973ac4d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:23Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://48690263b9333ee21bcb8d48098a3982a714d3ded71be825add6ee02741bb3ad\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c20537b3ef1c743b8543a32f63daa1b4171c363b92d1db2aac287a2bc26c76b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://662979573e8f58121ed2cae098f3017f83443edbee78c97d31826257031babdf\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-28T18:30:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-28T18:30:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hgml5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:18Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-snvkm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:14 crc kubenswrapper[4767]: I0128 18:31:14.994722 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"03298ae7-d68a-454d-a610-7e92c936df1a\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://586511a76883ecd5b90b43e161f9ceddc2f5859463ccb010be07f1cd6fac531d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e75edfad2dce8633f1802b86d549fe20cbf1dee05b1b154d03f60a3b167d21f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bbssx\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:30Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-xxtqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:14Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.009114 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"69c173a7-9963-46d3-a606-4e50d6657f88\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:57Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:29:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e9a1aea8ca72b1dbf448d3c1e9c7a904522c43f88b6d7e3a3e3c00f6116874f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:56Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://df67b82ffe7b55452f3d8e45968b46d12fa018c04b8d7921d9de4779c6e16535\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e2664cf85f81c533b067ffa1c6ba689145fc1991063c91957b0f4172a904fb2\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:29:57Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:29:54Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:15Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.021413 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:18Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:15Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.033386 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:19Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://54bc8472e1bade91936c7eb23aceb76bef9287f6522cb470df05d90262c37c6a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:19Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:15Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.045815 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://76eb1a8620b68a66ea1b35c0933ee166f5e6cdd87809dacc0efee9a85efcb267\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-28T18:30:21Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:15Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.051105 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.051178 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.051193 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.051241 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.051254 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.056019 4767 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-qbch4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T18:30:32Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2hnrq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T18:30:32Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-qbch4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:15Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.154091 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.154161 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.154174 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.154196 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.154238 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.256475 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.256508 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.256517 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.256529 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.256538 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.358708 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.358739 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.358746 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.358781 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.358789 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.461844 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.461893 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.461910 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.461931 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.461946 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.564864 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.564897 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.564906 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.564919 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.564928 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.668869 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.669506 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.669568 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.669604 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.669626 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.772810 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.772880 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.772903 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.772931 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.772953 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.794749 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:15 crc kubenswrapper[4767]: E0128 18:31:15.794915 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.813286 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 23:29:06.677380386 +0000 UTC Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.875579 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.875632 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.875648 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.875671 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.875690 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.978060 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.978097 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.978105 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.978120 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:15 crc kubenswrapper[4767]: I0128 18:31:15.978129 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:15Z","lastTransitionTime":"2026-01-28T18:31:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.081367 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.081417 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.081432 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.081453 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.081468 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.183415 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.183640 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.183648 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.183663 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.183672 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.286002 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.286054 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.286072 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.286097 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.286115 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.388300 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.388353 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.388369 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.388394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.388411 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.491403 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.491489 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.491506 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.491532 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.491551 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.595255 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.595352 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.595372 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.595395 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.595443 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.698733 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.699191 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.699424 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.699614 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.699755 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.794840 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.794919 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:16 crc kubenswrapper[4767]: E0128 18:31:16.795081 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.795140 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:16 crc kubenswrapper[4767]: E0128 18:31:16.795354 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:16 crc kubenswrapper[4767]: E0128 18:31:16.795508 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.801399 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.801428 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.801439 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.801453 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.801463 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.815331 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-30 00:31:43.666848204 +0000 UTC Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.904683 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.904774 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.904812 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.904839 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:16 crc kubenswrapper[4767]: I0128 18:31:16.904859 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:16Z","lastTransitionTime":"2026-01-28T18:31:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.007910 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.007949 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.007960 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.007978 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.007990 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.110856 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.110941 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.110960 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.110986 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.111003 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.214062 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.214476 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.215112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.215481 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.215622 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.318478 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.318515 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.318522 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.318536 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.318545 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.422300 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.422345 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.422355 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.422372 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.422382 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.525036 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.525406 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.525479 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.525547 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.525607 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.627556 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.627593 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.627602 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.627617 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.627626 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.730852 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.730892 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.730906 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.730928 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.730939 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.794878 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:17 crc kubenswrapper[4767]: E0128 18:31:17.795020 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.816149 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 03:40:55.142388469 +0000 UTC Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.833516 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.833550 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.833560 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.833579 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.833588 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.935811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.936014 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.936099 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.936171 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:17 crc kubenswrapper[4767]: I0128 18:31:17.936259 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:17Z","lastTransitionTime":"2026-01-28T18:31:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.038684 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.038923 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.038985 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.039052 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.039123 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.142464 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.142774 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.142841 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.142907 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.142968 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.245663 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.245718 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.245735 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.245761 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.245779 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.347787 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.347853 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.347866 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.347884 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.347923 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.450597 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.450669 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.450694 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.450725 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.450741 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.553124 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.553171 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.553183 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.553238 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.553263 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.655240 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.655276 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.655289 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.655305 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.655316 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.757917 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.758025 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.758043 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.758076 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.758096 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.795496 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.795707 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:18 crc kubenswrapper[4767]: E0128 18:31:18.795820 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.795852 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:18 crc kubenswrapper[4767]: E0128 18:31:18.795985 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:18 crc kubenswrapper[4767]: E0128 18:31:18.796034 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.817013 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 08:10:45.497850334 +0000 UTC Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.861809 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.861878 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.861896 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.861928 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.861942 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.965522 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.965568 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.965579 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.965596 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:18 crc kubenswrapper[4767]: I0128 18:31:18.965607 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:18Z","lastTransitionTime":"2026-01-28T18:31:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.068283 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.068686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.068874 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.069077 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.069315 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.171550 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.171809 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.172091 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.172307 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.172486 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.275943 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.276383 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.276586 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.276787 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.276963 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.385301 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.385644 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.385811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.385911 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.385991 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.489420 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.489755 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.489847 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.489950 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.490039 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.538317 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.538645 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.539103 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.539511 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.539862 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: E0128 18:31:19.724735 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:19Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.728145 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.728175 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.728189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.728224 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.728237 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: E0128 18:31:19.739348 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:19Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.744374 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.744441 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.744458 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.744491 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.744509 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: E0128 18:31:19.759436 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:19Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.763993 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.764042 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.764055 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.764078 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.764093 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: E0128 18:31:19.776489 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:19Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.779066 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.779086 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.779093 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.779107 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.779118 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: E0128 18:31:19.788948 4767 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-28T18:31:19Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"682eb627-cced-4633-9793-d2848d7cb55d\\\",\\\"systemUUID\\\":\\\"00f6b3bc-7fae-41f7-8bb1-b999bfbcd048\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-28T18:31:19Z is after 2025-08-24T17:21:41Z" Jan 28 18:31:19 crc kubenswrapper[4767]: E0128 18:31:19.789094 4767 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.790149 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.790166 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.790173 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.790183 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.790192 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.805997 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:19 crc kubenswrapper[4767]: E0128 18:31:19.806173 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.817145 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 16:03:39.393200535 +0000 UTC Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.892991 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.893034 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.893045 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.893066 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.893077 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.996237 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.996279 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.996288 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.996305 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:19 crc kubenswrapper[4767]: I0128 18:31:19.996315 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:19Z","lastTransitionTime":"2026-01-28T18:31:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.099627 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.099888 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.099954 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.100015 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.100079 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.203314 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.203603 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.203698 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.203795 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.203878 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.306340 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.306615 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.306689 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.306765 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.306830 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.410050 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.410427 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.410577 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.410705 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.410900 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.514530 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.514577 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.514593 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.514616 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.514633 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.619129 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.619174 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.619185 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.619200 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.619225 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.722524 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.722605 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.722634 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.722664 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.722686 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.796947 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.797329 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.797330 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:20 crc kubenswrapper[4767]: E0128 18:31:20.797731 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:20 crc kubenswrapper[4767]: E0128 18:31:20.797921 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:20 crc kubenswrapper[4767]: E0128 18:31:20.798333 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.811624 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.818649 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-03 21:21:45.420374014 +0000 UTC Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.824630 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.824657 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.824665 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.824693 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.824705 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.927382 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.927740 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.927811 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.927874 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:20 crc kubenswrapper[4767]: I0128 18:31:20.927936 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:20Z","lastTransitionTime":"2026-01-28T18:31:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.033509 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.033596 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.033632 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.033662 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.033682 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.137620 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.137686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.137710 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.137742 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.137768 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.239934 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.240257 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.240394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.240544 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.240758 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.342987 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.343043 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.343060 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.343086 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.343103 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.446629 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.446694 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.446713 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.446737 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.446754 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.549519 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.549597 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.549623 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.549653 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.549672 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.652768 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.652827 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.652845 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.652869 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.652887 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.756036 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.756090 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.756101 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.756119 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.756129 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.795367 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:21 crc kubenswrapper[4767]: E0128 18:31:21.795535 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.796393 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.818807 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 23:27:51.3803838 +0000 UTC Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.859234 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.859271 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.859282 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.859298 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.859309 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.961913 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.961967 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.961984 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.962015 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:21 crc kubenswrapper[4767]: I0128 18:31:21.962033 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:21Z","lastTransitionTime":"2026-01-28T18:31:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.064609 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.064664 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.064679 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.064705 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.064722 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.167986 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.168042 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.168061 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.168082 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.168099 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.271128 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.271189 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.271233 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.271258 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.271276 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.374546 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.374584 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.374600 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.374622 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.374638 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.477524 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.477573 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.477585 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.477602 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.477613 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.580520 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.580571 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.580586 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.580603 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.580617 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.683026 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.683055 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.683065 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.683079 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.683088 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.747857 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.747958 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.748005 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:32:26.74797865 +0000 UTC m=+152.712161524 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.748061 4767 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.748091 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.748141 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:32:26.748126715 +0000 UTC m=+152.712309589 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.748294 4767 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.748363 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-28 18:32:26.748351242 +0000 UTC m=+152.712534206 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.785297 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.785333 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.785342 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.785359 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.785369 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.794554 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.794608 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.794673 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.794627 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.794733 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.794840 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.819642 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 09:20:13.907182483 +0000 UTC Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.849463 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.849519 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849658 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849686 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849700 4767 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849749 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-28 18:32:26.849733955 +0000 UTC m=+152.813916829 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849658 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849801 4767 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849813 4767 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:31:22 crc kubenswrapper[4767]: E0128 18:31:22.849845 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-28 18:32:26.849834758 +0000 UTC m=+152.814017632 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.888007 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.888087 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.888097 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.888113 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.888124 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.990515 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.990552 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.990563 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.990577 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:22 crc kubenswrapper[4767]: I0128 18:31:22.990587 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:22Z","lastTransitionTime":"2026-01-28T18:31:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.092624 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.092662 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.092672 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.092686 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.092695 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.194964 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.195008 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.195019 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.195037 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.195046 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.253628 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/2.log" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.255275 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerStarted","Data":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.255889 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.288134 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-snvkm" podStartSLOduration=65.288112517 podStartE2EDuration="1m5.288112517s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.287992563 +0000 UTC m=+89.252175457" watchObservedRunningTime="2026-01-28 18:31:23.288112517 +0000 UTC m=+89.252295401" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.296927 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.296972 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.296984 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.297010 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.297023 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.301878 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-xxtqp" podStartSLOduration=65.301861477 podStartE2EDuration="1m5.301861477s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.301731662 +0000 UTC m=+89.265914536" watchObservedRunningTime="2026-01-28 18:31:23.301861477 +0000 UTC m=+89.266044351" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.321125 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=59.321095223 podStartE2EDuration="59.321095223s" podCreationTimestamp="2026-01-28 18:30:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.320535915 +0000 UTC m=+89.284718819" watchObservedRunningTime="2026-01-28 18:31:23.321095223 +0000 UTC m=+89.285278097" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.336784 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-wcxcp" podStartSLOduration=67.336760573 podStartE2EDuration="1m7.336760573s" podCreationTimestamp="2026-01-28 18:30:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.336672641 +0000 UTC m=+89.300855525" watchObservedRunningTime="2026-01-28 18:31:23.336760573 +0000 UTC m=+89.300943447" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.399954 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.400030 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.400046 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.400066 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.400081 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.412758 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=3.412727184 podStartE2EDuration="3.412727184s" podCreationTimestamp="2026-01-28 18:31:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.412180516 +0000 UTC m=+89.376363400" watchObservedRunningTime="2026-01-28 18:31:23.412727184 +0000 UTC m=+89.376910058" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.458562 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=61.458529268 podStartE2EDuration="1m1.458529268s" podCreationTimestamp="2026-01-28 18:30:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.434939724 +0000 UTC m=+89.399122598" watchObservedRunningTime="2026-01-28 18:31:23.458529268 +0000 UTC m=+89.422712142" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.503466 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.503507 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.503517 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.503538 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.503551 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.510572 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podStartSLOduration=66.510550962 podStartE2EDuration="1m6.510550962s" podCreationTimestamp="2026-01-28 18:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.504777248 +0000 UTC m=+89.468960132" watchObservedRunningTime="2026-01-28 18:31:23.510550962 +0000 UTC m=+89.474733836" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.510789 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-qbch4"] Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.510914 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:23 crc kubenswrapper[4767]: E0128 18:31:23.511016 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.523589 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=35.523565928 podStartE2EDuration="35.523565928s" podCreationTimestamp="2026-01-28 18:30:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.522940498 +0000 UTC m=+89.487123372" watchObservedRunningTime="2026-01-28 18:31:23.523565928 +0000 UTC m=+89.487748802" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.576693 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=65.576671317 podStartE2EDuration="1m5.576671317s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.553483286 +0000 UTC m=+89.517666160" watchObservedRunningTime="2026-01-28 18:31:23.576671317 +0000 UTC m=+89.540854191" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.577057 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podStartSLOduration=65.577050129 podStartE2EDuration="1m5.577050129s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.576370687 +0000 UTC m=+89.540553571" watchObservedRunningTime="2026-01-28 18:31:23.577050129 +0000 UTC m=+89.541233003" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.589993 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-28qmq" podStartSLOduration=66.589968172 podStartE2EDuration="1m6.589968172s" podCreationTimestamp="2026-01-28 18:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.58925382 +0000 UTC m=+89.553436704" watchObservedRunningTime="2026-01-28 18:31:23.589968172 +0000 UTC m=+89.554151046" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.606486 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.606776 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.606841 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.606930 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.606999 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.623367 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-hjjlv" podStartSLOduration=66.623342361 podStartE2EDuration="1m6.623342361s" podCreationTimestamp="2026-01-28 18:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:23.622010188 +0000 UTC m=+89.586193062" watchObservedRunningTime="2026-01-28 18:31:23.623342361 +0000 UTC m=+89.587525235" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.709541 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.709577 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.709587 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.709603 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.709615 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.812837 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.812889 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.812904 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.812931 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.812953 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.820278 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 15:26:50.824804169 +0000 UTC Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.918587 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.918636 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.918650 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.918675 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:23 crc kubenswrapper[4767]: I0128 18:31:23.918692 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:23Z","lastTransitionTime":"2026-01-28T18:31:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.021624 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.021970 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.021980 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.021996 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.022010 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.123928 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.123958 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.123968 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.123982 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.123994 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.227265 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.227304 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.227314 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.227330 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.227340 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.329996 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.330042 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.330055 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.330100 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.330114 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.432441 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.432494 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.432503 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.432518 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.432529 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.535379 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.535453 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.535473 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.535495 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.535573 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.638605 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.638645 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.638657 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.638677 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.638691 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.746159 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.746271 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.746291 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.746315 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.746330 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.795296 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.795345 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.795353 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.795368 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:24 crc kubenswrapper[4767]: E0128 18:31:24.796258 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-qbch4" podUID="0dc01d59-d401-4c7c-9eec-0a67aa5261fc" Jan 28 18:31:24 crc kubenswrapper[4767]: E0128 18:31:24.796440 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 28 18:31:24 crc kubenswrapper[4767]: E0128 18:31:24.796547 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 28 18:31:24 crc kubenswrapper[4767]: E0128 18:31:24.796650 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.820980 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 20:51:25.908671646 +0000 UTC Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.848575 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.848607 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.848615 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.848630 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.848642 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.950675 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.950701 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.950709 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.950721 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:24 crc kubenswrapper[4767]: I0128 18:31:24.950730 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:24Z","lastTransitionTime":"2026-01-28T18:31:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.053591 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.053627 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.053636 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.053650 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.053659 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.155939 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.155984 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.156000 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.156024 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.156042 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.259130 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.259182 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.259197 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.259241 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.259260 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.363665 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.363737 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.363761 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.363794 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.363812 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.467564 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.467617 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.467636 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.467661 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.467679 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.570341 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.570394 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.570407 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.570428 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.570440 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.674112 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.674166 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.674179 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.674201 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.674252 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.776935 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.776984 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.776995 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.777013 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.777027 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.821668 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 19:25:39.767279934 +0000 UTC Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.880126 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.880192 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.880247 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.880300 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.880323 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.982776 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.982838 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.982849 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.982867 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:25 crc kubenswrapper[4767]: I0128 18:31:25.982878 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:25Z","lastTransitionTime":"2026-01-28T18:31:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.085925 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.085963 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.085974 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.085990 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.086003 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:26Z","lastTransitionTime":"2026-01-28T18:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.189621 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.189698 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.189716 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.189743 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.189767 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:26Z","lastTransitionTime":"2026-01-28T18:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.293592 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.293696 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.293726 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.293764 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.293792 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:26Z","lastTransitionTime":"2026-01-28T18:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.397141 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.397266 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.397291 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.397319 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.397338 4767 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-28T18:31:26Z","lastTransitionTime":"2026-01-28T18:31:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.500712 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.500762 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.500778 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.500799 4767 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.500902 4767 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.554974 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vqr6q"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.556152 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.557469 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.558304 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.559681 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.561293 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-bmcrg"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.562469 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.562647 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.563429 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.567815 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24jzn"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.568499 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.575640 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.576472 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.577073 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.577234 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.577934 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vc67h"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.578734 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.581100 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.582736 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.582984 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.583315 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.583360 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pbbt4"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.584102 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.585613 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.585884 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.586604 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.586942 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.587280 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.587504 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.587719 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.589877 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.590235 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.590447 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606703 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-etcd-serving-ca\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606761 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-etcd-client\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606804 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606837 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-config\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606866 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/24a3b544-3082-45d9-956f-1540b9725ea2-images\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606891 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qwqg\" (UniqueName: \"kubernetes.io/projected/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-kube-api-access-8qwqg\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606922 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-audit-policies\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606955 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/0c90af26-b1fa-4faf-bbba-903dc47f7a46-node-pullsecrets\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.606984 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-machine-approver-tls\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607010 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-trusted-ca-bundle\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607031 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c9ceeaa1-d900-4c38-89ef-31abebe17be9-audit-dir\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607059 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-config\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607087 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cdlw\" (UniqueName: \"kubernetes.io/projected/24a3b544-3082-45d9-956f-1540b9725ea2-kube-api-access-7cdlw\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607112 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-serving-cert\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607151 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-image-import-ca\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607192 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/24a3b544-3082-45d9-956f-1540b9725ea2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607251 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-etcd-client\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607280 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-auth-proxy-config\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607307 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zzsxt\" (UniqueName: \"kubernetes.io/projected/0c90af26-b1fa-4faf-bbba-903dc47f7a46-kube-api-access-zzsxt\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607331 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-encryption-config\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607388 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24a3b544-3082-45d9-956f-1540b9725ea2-config\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607415 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-encryption-config\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607439 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-serving-cert\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607465 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n4jjp\" (UniqueName: \"kubernetes.io/projected/c9ceeaa1-d900-4c38-89ef-31abebe17be9-kube-api-access-n4jjp\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607485 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-audit\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607513 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0c90af26-b1fa-4faf-bbba-903dc47f7a46-audit-dir\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.607539 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.608924 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.608953 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.609415 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.609621 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.609960 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.610155 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.610882 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.611398 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.611605 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.612555 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.612836 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.613436 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.613487 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.614008 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.614031 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.614044 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.618274 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.618417 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619188 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619237 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619346 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619435 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619522 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619574 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619689 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619732 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619799 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619838 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619934 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619541 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.620178 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.619689 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.620550 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.620852 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.620994 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.621154 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.628535 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.631165 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.632272 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.632753 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.633056 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.633415 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.633455 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.633573 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b7z9l"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.633781 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.634028 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.635273 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.635675 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.639541 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.639924 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.640401 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.642932 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.643713 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.644158 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.644390 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.644539 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.644597 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.644718 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.644763 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.644844 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645015 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645227 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645424 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645533 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-qfdzz"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645558 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645670 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645800 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.645935 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.646053 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.646112 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.646182 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.646361 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.648925 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.658930 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.662083 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.670947 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.692192 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.693588 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.693675 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.693812 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.694982 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.695058 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.695129 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.695193 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.695415 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.696504 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.696714 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.696999 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.697097 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-n85dl"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.697112 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.697190 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.697458 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.697698 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-4svb7"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.697994 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.698042 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.698191 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.698353 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.698433 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.698563 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.698622 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.698946 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.706270 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.706380 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.706513 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.706525 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.706567 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.706270 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708407 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708441 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708460 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-config\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708480 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kvdn8\" (UniqueName: \"kubernetes.io/projected/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-kube-api-access-kvdn8\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708497 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-etcd-client\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708513 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-etcd-serving-ca\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708530 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708546 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708560 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-config\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708575 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-policies\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708590 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hlgw\" (UniqueName: \"kubernetes.io/projected/1ac82155-1d09-4371-869a-e7edb9c4d5bc-kube-api-access-7hlgw\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708612 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-serving-cert\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708626 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708643 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/24a3b544-3082-45d9-956f-1540b9725ea2-images\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708661 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qwqg\" (UniqueName: \"kubernetes.io/projected/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-kube-api-access-8qwqg\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708681 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-trusted-ca-bundle\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708697 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/302a671f-1c82-402b-9450-d27a1566dc3f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708712 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-config\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708727 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72d10cb6-6739-48fc-a732-8e8981bf80c9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708743 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c4z68\" (UniqueName: \"kubernetes.io/projected/72d10cb6-6739-48fc-a732-8e8981bf80c9-kube-api-access-c4z68\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708759 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-client-ca\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708774 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708790 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708816 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-service-ca\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708830 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-serving-cert\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708846 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-audit-policies\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708863 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708881 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/0c90af26-b1fa-4faf-bbba-903dc47f7a46-node-pullsecrets\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708898 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65d6565f-d0b8-441e-8f17-293368650b57-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708915 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-config\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708934 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-serving-cert\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708953 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-machine-approver-tls\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708974 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.708989 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/65d6565f-d0b8-441e-8f17-293368650b57-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709006 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-trusted-ca-bundle\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709022 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709036 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709052 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c9ceeaa1-d900-4c38-89ef-31abebe17be9-audit-dir\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709068 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-config\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709084 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/302a671f-1c82-402b-9450-d27a1566dc3f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709099 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cdlw\" (UniqueName: \"kubernetes.io/projected/24a3b544-3082-45d9-956f-1540b9725ea2-kube-api-access-7cdlw\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709115 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-serving-cert\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709131 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-config\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709156 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-image-import-ca\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709173 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-client-ca\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709188 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glvgt\" (UniqueName: \"kubernetes.io/projected/9414071e-c859-4b9b-a1b2-51026b6886d7-kube-api-access-glvgt\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709220 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709237 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-oauth-config\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709250 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-oauth-serving-cert\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709272 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/65d6565f-d0b8-441e-8f17-293368650b57-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709286 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-dir\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709302 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709319 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-etcd-client\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709335 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4kth\" (UniqueName: \"kubernetes.io/projected/a5c87c48-f06a-4f35-a336-2d74a88c40ac-kube-api-access-z4kth\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709352 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/24a3b544-3082-45d9-956f-1540b9725ea2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709371 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709386 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709403 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-auth-proxy-config\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709419 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709435 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709450 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9414071e-c859-4b9b-a1b2-51026b6886d7-trusted-ca\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709467 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zzsxt\" (UniqueName: \"kubernetes.io/projected/0c90af26-b1fa-4faf-bbba-903dc47f7a46-kube-api-access-zzsxt\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709482 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pm885\" (UniqueName: \"kubernetes.io/projected/302a671f-1c82-402b-9450-d27a1566dc3f-kube-api-access-pm885\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709498 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72d10cb6-6739-48fc-a732-8e8981bf80c9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709513 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709529 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zb6k2\" (UniqueName: \"kubernetes.io/projected/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-kube-api-access-zb6k2\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709544 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/65d6565f-d0b8-441e-8f17-293368650b57-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709561 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9414071e-c859-4b9b-a1b2-51026b6886d7-serving-cert\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709576 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65d6565f-d0b8-441e-8f17-293368650b57-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709593 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-encryption-config\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709607 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-serving-cert\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709635 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24a3b544-3082-45d9-956f-1540b9725ea2-config\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709649 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-encryption-config\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709665 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-serving-cert\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709680 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9414071e-c859-4b9b-a1b2-51026b6886d7-config\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709695 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-service-ca-bundle\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709712 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n4jjp\" (UniqueName: \"kubernetes.io/projected/c9ceeaa1-d900-4c38-89ef-31abebe17be9-kube-api-access-n4jjp\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709726 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-audit\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709741 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qpb5\" (UniqueName: \"kubernetes.io/projected/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-kube-api-access-4qpb5\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709756 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7sxqg\" (UniqueName: \"kubernetes.io/projected/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-kube-api-access-7sxqg\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709770 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0c90af26-b1fa-4faf-bbba-903dc47f7a46-audit-dir\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.709822 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0c90af26-b1fa-4faf-bbba-903dc47f7a46-audit-dir\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.710684 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-config\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.711079 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.711377 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/24a3b544-3082-45d9-956f-1540b9725ea2-images\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.711552 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24a3b544-3082-45d9-956f-1540b9725ea2-config\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.711709 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v7787"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.711985 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-auth-proxy-config\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.712053 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.712245 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.712516 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.712868 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-kx6dk"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.712894 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/0c90af26-b1fa-4faf-bbba-903dc47f7a46-node-pullsecrets\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.713122 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-audit-policies\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.713189 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c9ceeaa1-d900-4c38-89ef-31abebe17be9-audit-dir\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.713702 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.716791 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b2scv"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.716929 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-etcd-serving-ca\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.717301 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.717996 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-etcd-client\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.718229 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-audit\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.718446 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-ttsxj"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.718867 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.719876 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-encryption-config\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.720759 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-config\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.721606 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-trusted-ca-bundle\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.721721 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/0c90af26-b1fa-4faf-bbba-903dc47f7a46-image-import-ca\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.721882 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-encryption-config\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.722408 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/24a3b544-3082-45d9-956f-1540b9725ea2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.722578 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-machine-approver-tls\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.722726 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c9ceeaa1-d900-4c38-89ef-31abebe17be9-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.722861 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-etcd-client\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.724524 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-77vtc"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.724799 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.724944 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c90af26-b1fa-4faf-bbba-903dc47f7a46-serving-cert\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.725133 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.725307 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9ceeaa1-d900-4c38-89ef-31abebe17be9-serving-cert\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.726740 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.728383 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.728993 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.729816 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.733621 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.734167 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.734621 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.735003 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.735818 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.736357 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.737799 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-q6cnp"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.738315 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.738480 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.746849 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.747828 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.749698 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rll48"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.750622 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.750949 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-rr2mf"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.751614 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.753742 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vqr6q"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.757734 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.761229 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.766856 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-58t9w"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.767129 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.769341 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.769416 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxsvp"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.774052 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.774126 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.775354 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.775684 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.775746 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5h8dc"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.775982 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.777020 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.777188 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.778461 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.778647 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-g62xj"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.778941 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.779246 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.781271 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vc67h"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.782865 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.784861 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.786239 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-4svb7"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.788076 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.789217 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24jzn"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.790174 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.791420 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-kx6dk"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.792778 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.793983 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-bmcrg"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.794838 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.795068 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.795153 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.795515 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.797986 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.802525 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b7z9l"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.802558 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.802570 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.802582 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.802591 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v7787"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.802602 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-t9n45"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.803189 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-n85dl"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.803331 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-g62xj"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.803504 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.805400 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.806839 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.807926 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qfdzz"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.809099 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810094 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pbbt4"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810512 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qpb5\" (UniqueName: \"kubernetes.io/projected/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-kube-api-access-4qpb5\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810546 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7sxqg\" (UniqueName: \"kubernetes.io/projected/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-kube-api-access-7sxqg\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810572 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-626mx\" (UniqueName: \"kubernetes.io/projected/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-kube-api-access-626mx\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810592 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-config\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810610 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kvdn8\" (UniqueName: \"kubernetes.io/projected/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-kube-api-access-kvdn8\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810628 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-client\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810648 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhwk4\" (UniqueName: \"kubernetes.io/projected/dbc51af2-c004-4115-a615-9d98b418c70f-kube-api-access-xhwk4\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810670 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810688 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hlgw\" (UniqueName: \"kubernetes.io/projected/1ac82155-1d09-4371-869a-e7edb9c4d5bc-kube-api-access-7hlgw\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810706 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810726 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-srv-cert\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810747 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvp8m\" (UniqueName: \"kubernetes.io/projected/cb68a9be-651d-4412-a679-b8da3905f2dc-kube-api-access-nvp8m\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810767 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v4gq4\" (UniqueName: \"kubernetes.io/projected/bfa657b3-28e7-4503-a332-a2cd83725356-kube-api-access-v4gq4\") pod \"downloads-7954f5f757-4svb7\" (UID: \"bfa657b3-28e7-4503-a332-a2cd83725356\") " pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810790 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-cabundle\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810813 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-proxy-tls\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810844 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-74f6z\" (UniqueName: \"kubernetes.io/projected/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-kube-api-access-74f6z\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810862 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-trusted-ca-bundle\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810883 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/302a671f-1c82-402b-9450-d27a1566dc3f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810905 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c4z68\" (UniqueName: \"kubernetes.io/projected/72d10cb6-6739-48fc-a732-8e8981bf80c9-kube-api-access-c4z68\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810923 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810942 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810957 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-serving-cert\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.810978 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811001 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-proxy-tls\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811019 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65d6565f-d0b8-441e-8f17-293368650b57-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811036 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g9jf\" (UniqueName: \"kubernetes.io/projected/19bfb345-68d7-418c-9695-2842e8b9f53a-kube-api-access-8g9jf\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811057 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-serving-cert\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811075 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pjn4q\" (UniqueName: \"kubernetes.io/projected/b8db25ab-2803-4342-a0a3-c1bd5a44ba2e-kube-api-access-pjn4q\") pod \"multus-admission-controller-857f4d67dd-q6cnp\" (UID: \"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811093 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g5ggr\" (UniqueName: \"kubernetes.io/projected/bef36296-0ea5-4316-a31a-c14346fc1597-kube-api-access-g5ggr\") pod \"control-plane-machine-set-operator-78cbb6b69f-ctkl7\" (UID: \"bef36296-0ea5-4316-a31a-c14346fc1597\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811114 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-ca\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811134 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811155 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811175 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb68a9be-651d-4412-a679-b8da3905f2dc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811191 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-stats-auth\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811237 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-serving-cert\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811254 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-service-ca\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811271 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cscjz\" (UniqueName: \"kubernetes.io/projected/50052522-e0ba-4c6f-9b01-441578281afa-kube-api-access-cscjz\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811291 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-client-ca\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811312 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glvgt\" (UniqueName: \"kubernetes.io/projected/9414071e-c859-4b9b-a1b2-51026b6886d7-kube-api-access-glvgt\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811332 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e43f862d-aa02-45c1-ab71-9b8235aacd37-config\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811348 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-oauth-serving-cert\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811365 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-dir\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811384 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e10737ed-6d67-4dd1-9529-d46265d7cb29-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811402 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811407 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811417 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e43f862d-aa02-45c1-ab71-9b8235aacd37-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.811466 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/389ef0a8-f13c-4eb7-8293-cecaff735697-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.812152 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-config\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.812199 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-q6cnp"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.812539 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-trusted-ca-bundle\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.812569 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.812684 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.812936 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-client-ca\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813109 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9414071e-c859-4b9b-a1b2-51026b6886d7-trusted-ca\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813142 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-842fq\" (UniqueName: \"kubernetes.io/projected/2c588228-398e-4f20-bf14-03450184cc20-kube-api-access-842fq\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813166 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pm885\" (UniqueName: \"kubernetes.io/projected/302a671f-1c82-402b-9450-d27a1566dc3f-kube-api-access-pm885\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813188 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72d10cb6-6739-48fc-a732-8e8981bf80c9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813221 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zb6k2\" (UniqueName: \"kubernetes.io/projected/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-kube-api-access-zb6k2\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813240 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/65d6565f-d0b8-441e-8f17-293368650b57-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813260 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-key\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813282 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9414071e-c859-4b9b-a1b2-51026b6886d7-serving-cert\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813301 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e43f862d-aa02-45c1-ab71-9b8235aacd37-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813219 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-dir\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813394 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-profile-collector-cert\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813427 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-service-ca-bundle\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813447 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50052522-e0ba-4c6f-9b01-441578281afa-serving-cert\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813484 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10737ed-6d67-4dd1-9529-d46265d7cb29-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813515 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.813534 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c588228-398e-4f20-bf14-03450184cc20-service-ca-bundle\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814053 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814272 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/65d6565f-d0b8-441e-8f17-293368650b57-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814361 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/302a671f-1c82-402b-9450-d27a1566dc3f-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814404 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-77vtc"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814420 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-policies\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814467 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-images\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814516 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-serving-cert\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814571 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fd30d9e8-8c0c-4db4-8923-7a7a03e1df31-metrics-tls\") pod \"dns-operator-744455d44c-kx6dk\" (UID: \"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31\") " pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814603 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814669 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b8db25ab-2803-4342-a0a3-c1bd5a44ba2e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-q6cnp\" (UID: \"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814803 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-oauth-serving-cert\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.814881 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-config\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.815605 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72d10cb6-6739-48fc-a732-8e8981bf80c9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.815706 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-client-ca\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.815810 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-service-ca\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.815912 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-config\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.815652 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-4tbt4"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816043 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-serving-cert\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.815455 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-service-ca-bundle\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816021 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cdebe9f-d5ed-4fd3-9648-6cea02821835-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816311 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5cdebe9f-d5ed-4fd3-9648-6cea02821835-config\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816408 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbc51af2-c004-4115-a615-9d98b418c70f-serving-cert\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816504 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-config\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816592 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816648 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-service-ca\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816683 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjqdx\" (UniqueName: \"kubernetes.io/projected/fd30d9e8-8c0c-4db4-8923-7a7a03e1df31-kube-api-access-rjqdx\") pod \"dns-operator-744455d44c-kx6dk\" (UID: \"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31\") " pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816835 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dn4t2\" (UniqueName: \"kubernetes.io/projected/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-kube-api-access-dn4t2\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816925 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/65d6565f-d0b8-441e-8f17-293368650b57-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817015 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.816200 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/65d6565f-d0b8-441e-8f17-293368650b57-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.815538 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817122 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-config\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817127 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817336 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/72d10cb6-6739-48fc-a732-8e8981bf80c9-config\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817397 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817562 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-km97p\" (UniqueName: \"kubernetes.io/projected/389ef0a8-f13c-4eb7-8293-cecaff735697-kube-api-access-km97p\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817694 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/302a671f-1c82-402b-9450-d27a1566dc3f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817816 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-config\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817946 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ff24w\" (UniqueName: \"kubernetes.io/projected/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-kube-api-access-ff24w\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818088 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb68a9be-651d-4412-a679-b8da3905f2dc-srv-cert\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.817947 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818270 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9414071e-c859-4b9b-a1b2-51026b6886d7-serving-cert\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818245 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/389ef0a8-f13c-4eb7-8293-cecaff735697-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818521 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e10737ed-6d67-4dd1-9529-d46265d7cb29-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818643 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818744 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-oauth-config\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818850 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/bef36296-0ea5-4316-a31a-c14346fc1597-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-ctkl7\" (UID: \"bef36296-0ea5-4316-a31a-c14346fc1597\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818922 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.818978 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.819117 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/65d6565f-d0b8-441e-8f17-293368650b57-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.819254 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.819370 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-default-certificate\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.819809 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5cdebe9f-d5ed-4fd3-9648-6cea02821835-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.819968 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4kth\" (UniqueName: \"kubernetes.io/projected/a5c87c48-f06a-4f35-a336-2d74a88c40ac-kube-api-access-z4kth\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.820098 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.820235 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50052522-e0ba-4c6f-9b01-441578281afa-config\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.826380 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827044 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dbc51af2-c004-4115-a615-9d98b418c70f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827283 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827399 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65d6565f-d0b8-441e-8f17-293368650b57-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827506 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-serving-cert\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827625 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-metrics-certs\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827746 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9414071e-c859-4b9b-a1b2-51026b6886d7-config\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.821767 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9414071e-c859-4b9b-a1b2-51026b6886d7-trusted-ca\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.821962 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-oauth-config\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.826286 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-serving-cert\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.820343 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.826599 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827789 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-policies\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.820492 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.821306 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.828953 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b2scv"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.820399 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.819262 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-config\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.821873 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 10:18:21.349880942 +0000 UTC Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.829546 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.827984 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.830023 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.819132 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/65d6565f-d0b8-441e-8f17-293368650b57-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.821257 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.830658 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.830808 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/302a671f-1c82-402b-9450-d27a1566dc3f-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.831317 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-serving-cert\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.831414 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.831732 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9414071e-c859-4b9b-a1b2-51026b6886d7-config\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.831916 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-58t9w"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.832065 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/65d6565f-d0b8-441e-8f17-293368650b57-service-ca\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.832287 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-client-ca\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.832291 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.832774 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/72d10cb6-6739-48fc-a732-8e8981bf80c9-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.833473 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.833534 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-config\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.833761 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-serving-cert\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.834252 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-4tbt4"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.835377 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-rr2mf"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.836400 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rll48"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.837419 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxsvp"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.838028 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.839066 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.840983 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5h8dc"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.842473 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.843701 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf"] Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.858760 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.913505 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qwqg\" (UniqueName: \"kubernetes.io/projected/85c09fc3-36bb-43e9-a1a1-16eeab18b96f-kube-api-access-8qwqg\") pod \"machine-approver-56656f9798-cwpk4\" (UID: \"85c09fc3-36bb-43e9-a1a1-16eeab18b96f\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.924214 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929372 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e43f862d-aa02-45c1-ab71-9b8235aacd37-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929417 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/389ef0a8-f13c-4eb7-8293-cecaff735697-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929445 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-842fq\" (UniqueName: \"kubernetes.io/projected/2c588228-398e-4f20-bf14-03450184cc20-kube-api-access-842fq\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929513 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-key\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929538 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e43f862d-aa02-45c1-ab71-9b8235aacd37-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929562 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-profile-collector-cert\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929594 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50052522-e0ba-4c6f-9b01-441578281afa-serving-cert\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929617 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10737ed-6d67-4dd1-9529-d46265d7cb29-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929641 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c588228-398e-4f20-bf14-03450184cc20-service-ca-bundle\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929663 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-images\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929724 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fd30d9e8-8c0c-4db4-8923-7a7a03e1df31-metrics-tls\") pod \"dns-operator-744455d44c-kx6dk\" (UID: \"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31\") " pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929756 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929787 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b8db25ab-2803-4342-a0a3-c1bd5a44ba2e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-q6cnp\" (UID: \"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929811 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-config\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929833 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5cdebe9f-d5ed-4fd3-9648-6cea02821835-config\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929855 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbc51af2-c004-4115-a615-9d98b418c70f-serving-cert\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929876 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cdebe9f-d5ed-4fd3-9648-6cea02821835-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929899 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929920 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjqdx\" (UniqueName: \"kubernetes.io/projected/fd30d9e8-8c0c-4db4-8923-7a7a03e1df31-kube-api-access-rjqdx\") pod \"dns-operator-744455d44c-kx6dk\" (UID: \"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31\") " pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929942 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dn4t2\" (UniqueName: \"kubernetes.io/projected/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-kube-api-access-dn4t2\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.929975 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-km97p\" (UniqueName: \"kubernetes.io/projected/389ef0a8-f13c-4eb7-8293-cecaff735697-kube-api-access-km97p\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930001 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ff24w\" (UniqueName: \"kubernetes.io/projected/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-kube-api-access-ff24w\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930020 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb68a9be-651d-4412-a679-b8da3905f2dc-srv-cert\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930044 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/389ef0a8-f13c-4eb7-8293-cecaff735697-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930109 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e10737ed-6d67-4dd1-9529-d46265d7cb29-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930134 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/bef36296-0ea5-4316-a31a-c14346fc1597-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-ctkl7\" (UID: \"bef36296-0ea5-4316-a31a-c14346fc1597\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930191 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-default-certificate\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930229 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5cdebe9f-d5ed-4fd3-9648-6cea02821835-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930259 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50052522-e0ba-4c6f-9b01-441578281afa-config\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930284 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dbc51af2-c004-4115-a615-9d98b418c70f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930314 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-metrics-certs\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930371 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-626mx\" (UniqueName: \"kubernetes.io/projected/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-kube-api-access-626mx\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930401 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-client\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930425 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhwk4\" (UniqueName: \"kubernetes.io/projected/dbc51af2-c004-4115-a615-9d98b418c70f-kube-api-access-xhwk4\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930457 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-srv-cert\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930480 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v4gq4\" (UniqueName: \"kubernetes.io/projected/bfa657b3-28e7-4503-a332-a2cd83725356-kube-api-access-v4gq4\") pod \"downloads-7954f5f757-4svb7\" (UID: \"bfa657b3-28e7-4503-a332-a2cd83725356\") " pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930500 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-cabundle\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930521 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-proxy-tls\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930543 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvp8m\" (UniqueName: \"kubernetes.io/projected/cb68a9be-651d-4412-a679-b8da3905f2dc-kube-api-access-nvp8m\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930568 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-74f6z\" (UniqueName: \"kubernetes.io/projected/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-kube-api-access-74f6z\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930606 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-proxy-tls\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930628 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g9jf\" (UniqueName: \"kubernetes.io/projected/19bfb345-68d7-418c-9695-2842e8b9f53a-kube-api-access-8g9jf\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930652 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pjn4q\" (UniqueName: \"kubernetes.io/projected/b8db25ab-2803-4342-a0a3-c1bd5a44ba2e-kube-api-access-pjn4q\") pod \"multus-admission-controller-857f4d67dd-q6cnp\" (UID: \"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930675 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g5ggr\" (UniqueName: \"kubernetes.io/projected/bef36296-0ea5-4316-a31a-c14346fc1597-kube-api-access-g5ggr\") pod \"control-plane-machine-set-operator-78cbb6b69f-ctkl7\" (UID: \"bef36296-0ea5-4316-a31a-c14346fc1597\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930700 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-ca\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930724 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb68a9be-651d-4412-a679-b8da3905f2dc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930747 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-stats-auth\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930798 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-serving-cert\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930821 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-service-ca\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930843 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cscjz\" (UniqueName: \"kubernetes.io/projected/50052522-e0ba-4c6f-9b01-441578281afa-kube-api-access-cscjz\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930878 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e43f862d-aa02-45c1-ab71-9b8235aacd37-config\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.930901 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e10737ed-6d67-4dd1-9529-d46265d7cb29-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.932414 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e10737ed-6d67-4dd1-9529-d46265d7cb29-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.932851 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/dbc51af2-c004-4115-a615-9d98b418c70f-available-featuregates\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.933049 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n4jjp\" (UniqueName: \"kubernetes.io/projected/c9ceeaa1-d900-4c38-89ef-31abebe17be9-kube-api-access-n4jjp\") pod \"apiserver-7bbb656c7d-svn7g\" (UID: \"c9ceeaa1-d900-4c38-89ef-31abebe17be9\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.933897 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-auth-proxy-config\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.934431 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.934601 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.935266 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e10737ed-6d67-4dd1-9529-d46265d7cb29-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.935980 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5cdebe9f-d5ed-4fd3-9648-6cea02821835-config\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.938247 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dbc51af2-c004-4115-a615-9d98b418c70f-serving-cert\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.938887 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5cdebe9f-d5ed-4fd3-9648-6cea02821835-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.939021 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.958788 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 18:31:26 crc kubenswrapper[4767]: I0128 18:31:26.979155 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:26.999933 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.026759 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.039005 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.060194 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.080437 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.098689 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.119572 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.138753 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.153048 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g"] Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.158982 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.167398 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/fd30d9e8-8c0c-4db4-8923-7a7a03e1df31-metrics-tls\") pod \"dns-operator-744455d44c-kx6dk\" (UID: \"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31\") " pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.179059 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.221624 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.222810 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cdlw\" (UniqueName: \"kubernetes.io/projected/24a3b544-3082-45d9-956f-1540b9725ea2-kube-api-access-7cdlw\") pod \"machine-api-operator-5694c8668f-vqr6q\" (UID: \"24a3b544-3082-45d9-956f-1540b9725ea2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.223875 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-service-ca\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.240349 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.259138 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.267331 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-serving-cert\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.275240 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" event={"ID":"c9ceeaa1-d900-4c38-89ef-31abebe17be9","Type":"ContainerStarted","Data":"7cd3a59af36109e9554e1d0dd2a024224036e365bdd8ae111ff4dc09538cf09a"} Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.276532 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" event={"ID":"85c09fc3-36bb-43e9-a1a1-16eeab18b96f","Type":"ContainerStarted","Data":"5ec76dc5817172eb4d4e907723afb6b3f7848802a76d3787b72325a4deb8aa1f"} Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.279629 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.287152 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-client\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.299701 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.318218 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.338058 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.363999 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.374551 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-etcd-ca\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.396718 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zzsxt\" (UniqueName: \"kubernetes.io/projected/0c90af26-b1fa-4faf-bbba-903dc47f7a46-kube-api-access-zzsxt\") pod \"apiserver-76f77b778f-bmcrg\" (UID: \"0c90af26-b1fa-4faf-bbba-903dc47f7a46\") " pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.398709 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.407551 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2c588228-398e-4f20-bf14-03450184cc20-service-ca-bundle\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.418833 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.426424 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-default-certificate\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.438972 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.445304 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-stats-auth\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.452926 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-config\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.458617 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.466296 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/2c588228-398e-4f20-bf14-03450184cc20-metrics-certs\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.478642 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.479346 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.498973 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.519698 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.520429 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.539365 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.560147 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.579540 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.598572 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.619240 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.629562 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-proxy-tls\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.639309 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.659310 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.664727 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e43f862d-aa02-45c1-ab71-9b8235aacd37-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.673236 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-vqr6q"] Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.678991 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 18:31:27 crc kubenswrapper[4767]: W0128 18:31:27.681668 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24a3b544_3082_45d9_956f_1540b9725ea2.slice/crio-75c824d68446c2a30fcee17ed6bf44f5f22c4ee4e5220097f0b524f6881cf2a8 WatchSource:0}: Error finding container 75c824d68446c2a30fcee17ed6bf44f5f22c4ee4e5220097f0b524f6881cf2a8: Status 404 returned error can't find the container with id 75c824d68446c2a30fcee17ed6bf44f5f22c4ee4e5220097f0b524f6881cf2a8 Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.698409 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.706229 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e43f862d-aa02-45c1-ab71-9b8235aacd37-config\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.708197 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-bmcrg"] Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.718967 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.737100 4767 request.go:700] Waited for 1.000758091s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-operator-lifecycle-manager/secrets?fieldSelector=metadata.name%3Dolm-operator-serving-cert&limit=500&resourceVersion=0 Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.738479 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.749883 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/cb68a9be-651d-4412-a679-b8da3905f2dc-srv-cert\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.759973 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.779138 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.786320 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-profile-collector-cert\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.786801 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/cb68a9be-651d-4412-a679-b8da3905f2dc-profile-collector-cert\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.798974 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.806586 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/389ef0a8-f13c-4eb7-8293-cecaff735697-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.818660 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.839103 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.858823 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.866803 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b8db25ab-2803-4342-a0a3-c1bd5a44ba2e-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-q6cnp\" (UID: \"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.878719 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.898847 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.918783 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.932141 4767 secret.go:188] Couldn't get secret openshift-service-ca-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.932234 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/50052522-e0ba-4c6f-9b01-441578281afa-serving-cert podName:50052522-e0ba-4c6f-9b01-441578281afa nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.432199458 +0000 UTC m=+94.396382332 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/50052522-e0ba-4c6f-9b01-441578281afa-serving-cert") pod "service-ca-operator-777779d784-58t9w" (UID: "50052522-e0ba-4c6f-9b01-441578281afa") : failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.932294 4767 secret.go:188] Couldn't get secret openshift-service-ca/signing-key: failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.932315 4767 configmap.go:193] Couldn't get configMap openshift-kube-storage-version-migrator-operator/config: failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.932390 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-key podName:c9eb3f6d-70d5-4250-90e7-1046f1c1d370 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.432365613 +0000 UTC m=+94.396548557 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-key" (UniqueName: "kubernetes.io/secret/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-key") pod "service-ca-9c57cc56f-rr2mf" (UID: "c9eb3f6d-70d5-4250-90e7-1046f1c1d370") : failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.932434 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/389ef0a8-f13c-4eb7-8293-cecaff735697-config podName:389ef0a8-f13c-4eb7-8293-cecaff735697 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.432403174 +0000 UTC m=+94.396586128 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/389ef0a8-f13c-4eb7-8293-cecaff735697-config") pod "kube-storage-version-migrator-operator-b67b599dd-4ntrw" (UID: "389ef0a8-f13c-4eb7-8293-cecaff735697") : failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933341 4767 secret.go:188] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933394 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/bef36296-0ea5-4316-a31a-c14346fc1597-control-plane-machine-set-operator-tls podName:bef36296-0ea5-4316-a31a-c14346fc1597 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.433381426 +0000 UTC m=+94.397564300 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/bef36296-0ea5-4316-a31a-c14346fc1597-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-78cbb6b69f-ctkl7" (UID: "bef36296-0ea5-4316-a31a-c14346fc1597") : failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933396 4767 configmap.go:193] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933429 4767 secret.go:188] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933444 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-images podName:30268a6d-2d26-4c9e-8692-65dd9e6d75e1 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.433432458 +0000 UTC m=+94.397615402 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-images") pod "machine-config-operator-74547568cd-rll48" (UID: "30268a6d-2d26-4c9e-8692-65dd9e6d75e1") : failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933461 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-proxy-tls podName:30268a6d-2d26-4c9e-8692-65dd9e6d75e1 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.433451938 +0000 UTC m=+94.397634912 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-proxy-tls") pod "machine-config-operator-74547568cd-rll48" (UID: "30268a6d-2d26-4c9e-8692-65dd9e6d75e1") : failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933466 4767 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/catalog-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933524 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-srv-cert podName:19bfb345-68d7-418c-9695-2842e8b9f53a nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.43351746 +0000 UTC m=+94.397700334 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "srv-cert" (UniqueName: "kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-srv-cert") pod "catalog-operator-68c6474976-472lc" (UID: "19bfb345-68d7-418c-9695-2842e8b9f53a") : failed to sync secret cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933527 4767 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.933585 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-cabundle podName:c9eb3f6d-70d5-4250-90e7-1046f1c1d370 nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.433568952 +0000 UTC m=+94.397751906 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-cabundle") pod "service-ca-9c57cc56f-rr2mf" (UID: "c9eb3f6d-70d5-4250-90e7-1046f1c1d370") : failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.934564 4767 configmap.go:193] Couldn't get configMap openshift-service-ca-operator/service-ca-operator-config: failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: E0128 18:31:27.934630 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/50052522-e0ba-4c6f-9b01-441578281afa-config podName:50052522-e0ba-4c6f-9b01-441578281afa nodeName:}" failed. No retries permitted until 2026-01-28 18:31:28.434621056 +0000 UTC m=+94.398803930 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/50052522-e0ba-4c6f-9b01-441578281afa-config") pod "service-ca-operator-777779d784-58t9w" (UID: "50052522-e0ba-4c6f-9b01-441578281afa") : failed to sync configmap cache: timed out waiting for the condition Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.938758 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.957655 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.978510 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 18:31:27 crc kubenswrapper[4767]: I0128 18:31:27.999061 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.018481 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.039273 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.058676 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.080853 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.099249 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.118673 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.138837 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.158092 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.179012 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.199665 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.219302 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.238336 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.260330 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.278620 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.283877 4767 generic.go:334] "Generic (PLEG): container finished" podID="c9ceeaa1-d900-4c38-89ef-31abebe17be9" containerID="432e376572befaa5dbf1c04e17966106387d765d25e309368e50405ab913d778" exitCode=0 Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.284009 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" event={"ID":"c9ceeaa1-d900-4c38-89ef-31abebe17be9","Type":"ContainerDied","Data":"432e376572befaa5dbf1c04e17966106387d765d25e309368e50405ab913d778"} Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.287390 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" event={"ID":"0c90af26-b1fa-4faf-bbba-903dc47f7a46","Type":"ContainerStarted","Data":"48792515954a8c75bb3aa4145702596129cb2c97fcc821362d85040bab6902e5"} Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.288897 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" event={"ID":"24a3b544-3082-45d9-956f-1540b9725ea2","Type":"ContainerStarted","Data":"75c824d68446c2a30fcee17ed6bf44f5f22c4ee4e5220097f0b524f6881cf2a8"} Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.291818 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" event={"ID":"85c09fc3-36bb-43e9-a1a1-16eeab18b96f","Type":"ContainerStarted","Data":"4b0b28859e8ad6df5b1870038bb06ef63f3e6b13c01ec6057331329fb9e4077c"} Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.299984 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.318377 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.349034 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.359173 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.379830 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.398638 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.418657 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.459365 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.459571 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/bef36296-0ea5-4316-a31a-c14346fc1597-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-ctkl7\" (UID: \"bef36296-0ea5-4316-a31a-c14346fc1597\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.459893 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50052522-e0ba-4c6f-9b01-441578281afa-config\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.459998 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-srv-cert\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.460042 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-cabundle\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.460094 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-proxy-tls\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.460173 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/389ef0a8-f13c-4eb7-8293-cecaff735697-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.460259 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-key\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.460319 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50052522-e0ba-4c6f-9b01-441578281afa-serving-cert\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.460877 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-images\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.461237 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/389ef0a8-f13c-4eb7-8293-cecaff735697-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.461309 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-cabundle\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.461739 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-images\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.462130 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50052522-e0ba-4c6f-9b01-441578281afa-config\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.463492 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-proxy-tls\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.463606 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/19bfb345-68d7-418c-9695-2842e8b9f53a-srv-cert\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.464074 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/bef36296-0ea5-4316-a31a-c14346fc1597-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-ctkl7\" (UID: \"bef36296-0ea5-4316-a31a-c14346fc1597\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.464252 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-signing-key\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.465526 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/50052522-e0ba-4c6f-9b01-441578281afa-serving-cert\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.478684 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.499170 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.519484 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.538004 4767 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.557875 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.578616 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.598449 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.618299 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.637744 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.658997 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.679299 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.699184 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.718817 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.737408 4767 request.go:700] Waited for 1.941457873s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-network-diagnostics/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.740357 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.758653 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.778639 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.798302 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.818128 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.838707 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.881344 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7sxqg\" (UniqueName: \"kubernetes.io/projected/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-kube-api-access-7sxqg\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.894832 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kvdn8\" (UniqueName: \"kubernetes.io/projected/05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec-kube-api-access-kvdn8\") pod \"authentication-operator-69f744f599-vc67h\" (UID: \"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.912315 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qpb5\" (UniqueName: \"kubernetes.io/projected/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-kube-api-access-4qpb5\") pod \"controller-manager-879f6c89f-24jzn\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.933671 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hlgw\" (UniqueName: \"kubernetes.io/projected/1ac82155-1d09-4371-869a-e7edb9c4d5bc-kube-api-access-7hlgw\") pod \"console-f9d7485db-qfdzz\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.960520 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c4z68\" (UniqueName: \"kubernetes.io/projected/72d10cb6-6739-48fc-a732-8e8981bf80c9-kube-api-access-c4z68\") pod \"openshift-apiserver-operator-796bbdcf4f-lq7cv\" (UID: \"72d10cb6-6739-48fc-a732-8e8981bf80c9\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.973142 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glvgt\" (UniqueName: \"kubernetes.io/projected/9414071e-c859-4b9b-a1b2-51026b6886d7-kube-api-access-glvgt\") pod \"console-operator-58897d9998-b7z9l\" (UID: \"9414071e-c859-4b9b-a1b2-51026b6886d7\") " pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:28 crc kubenswrapper[4767]: I0128 18:31:28.991086 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pm885\" (UniqueName: \"kubernetes.io/projected/302a671f-1c82-402b-9450-d27a1566dc3f-kube-api-access-pm885\") pod \"openshift-controller-manager-operator-756b6f6bc6-bgjcd\" (UID: \"302a671f-1c82-402b-9450-d27a1566dc3f\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.011149 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-b2rv8\" (UID: \"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.030845 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zb6k2\" (UniqueName: \"kubernetes.io/projected/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-kube-api-access-zb6k2\") pod \"route-controller-manager-6576b87f9c-vx7rs\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.052356 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/65d6565f-d0b8-441e-8f17-293368650b57-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-7jkqc\" (UID: \"65d6565f-d0b8-441e-8f17-293368650b57\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.058620 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.064916 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.077552 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.098017 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.107025 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.148903 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.148967 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4kth\" (UniqueName: \"kubernetes.io/projected/a5c87c48-f06a-4f35-a336-2d74a88c40ac-kube-api-access-z4kth\") pod \"oauth-openshift-558db77b4-pbbt4\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.165981 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.176153 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" Jan 28 18:31:29 crc kubenswrapper[4767]: W0128 18:31:29.177915 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod65d6565f_d0b8_441e_8f17_293368650b57.slice/crio-a2f5dcb96aa2f8e75cf82a906a42385e8c301253dc70d55e72447ab225da36b8 WatchSource:0}: Error finding container a2f5dcb96aa2f8e75cf82a906a42385e8c301253dc70d55e72447ab225da36b8: Status 404 returned error can't find the container with id a2f5dcb96aa2f8e75cf82a906a42385e8c301253dc70d55e72447ab225da36b8 Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.185739 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.198761 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.201298 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.203403 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e10737ed-6d67-4dd1-9529-d46265d7cb29-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-5vd7s\" (UID: \"e10737ed-6d67-4dd1-9529-d46265d7cb29\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.208524 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.215059 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-842fq\" (UniqueName: \"kubernetes.io/projected/2c588228-398e-4f20-bf14-03450184cc20-kube-api-access-842fq\") pod \"router-default-5444994796-ttsxj\" (UID: \"2c588228-398e-4f20-bf14-03450184cc20\") " pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.236839 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24jzn"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.237337 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e43f862d-aa02-45c1-ab71-9b8235aacd37-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-lpxgp\" (UID: \"e43f862d-aa02-45c1-ab71-9b8235aacd37\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.259369 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-626mx\" (UniqueName: \"kubernetes.io/projected/c5e837ca-e053-4dc1-9e57-ddc248ba1cd3-kube-api-access-626mx\") pod \"etcd-operator-b45778765-b2scv\" (UID: \"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3\") " pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.270628 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.277020 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhwk4\" (UniqueName: \"kubernetes.io/projected/dbc51af2-c004-4115-a615-9d98b418c70f-kube-api-access-xhwk4\") pod \"openshift-config-operator-7777fb866f-n85dl\" (UID: \"dbc51af2-c004-4115-a615-9d98b418c70f\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:29 crc kubenswrapper[4767]: W0128 18:31:29.280411 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcfddbedb_c663_4cb3_ae11_1e0dc851cb2a.slice/crio-afaa4b90e785a316fe01ea76ac719e9320a6ce19e9a0dc50d4f8fc9b083d7f68 WatchSource:0}: Error finding container afaa4b90e785a316fe01ea76ac719e9320a6ce19e9a0dc50d4f8fc9b083d7f68: Status 404 returned error can't find the container with id afaa4b90e785a316fe01ea76ac719e9320a6ce19e9a0dc50d4f8fc9b083d7f68 Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.296397 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v4gq4\" (UniqueName: \"kubernetes.io/projected/bfa657b3-28e7-4503-a332-a2cd83725356-kube-api-access-v4gq4\") pod \"downloads-7954f5f757-4svb7\" (UID: \"bfa657b3-28e7-4503-a332-a2cd83725356\") " pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.304761 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.309694 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-vc67h"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.311886 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" event={"ID":"65d6565f-d0b8-441e-8f17-293368650b57","Type":"ContainerStarted","Data":"a2f5dcb96aa2f8e75cf82a906a42385e8c301253dc70d55e72447ab225da36b8"} Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.317812 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvp8m\" (UniqueName: \"kubernetes.io/projected/cb68a9be-651d-4412-a679-b8da3905f2dc-kube-api-access-nvp8m\") pod \"olm-operator-6b444d44fb-lslnr\" (UID: \"cb68a9be-651d-4412-a679-b8da3905f2dc\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.318226 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.320452 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" event={"ID":"85c09fc3-36bb-43e9-a1a1-16eeab18b96f","Type":"ContainerStarted","Data":"abb91d1c2464973c0ae99bd0bfdbe379f32a079071cb8b07f63c69352cb0054c"} Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.336950 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.342867 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-74f6z\" (UniqueName: \"kubernetes.io/projected/c9eb3f6d-70d5-4250-90e7-1046f1c1d370-kube-api-access-74f6z\") pod \"service-ca-9c57cc56f-rr2mf\" (UID: \"c9eb3f6d-70d5-4250-90e7-1046f1c1d370\") " pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.354729 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" event={"ID":"c9ceeaa1-d900-4c38-89ef-31abebe17be9","Type":"ContainerStarted","Data":"51c8b9e4792069fef21fab197f07d3f479a706c82d8ec1fb671c6eb94704aa3c"} Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.358083 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.358231 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pjn4q\" (UniqueName: \"kubernetes.io/projected/b8db25ab-2803-4342-a0a3-c1bd5a44ba2e-kube-api-access-pjn4q\") pod \"multus-admission-controller-857f4d67dd-q6cnp\" (UID: \"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.361046 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" event={"ID":"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a","Type":"ContainerStarted","Data":"afaa4b90e785a316fe01ea76ac719e9320a6ce19e9a0dc50d4f8fc9b083d7f68"} Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.365229 4767 generic.go:334] "Generic (PLEG): container finished" podID="0c90af26-b1fa-4faf-bbba-903dc47f7a46" containerID="2741865883b289a2cdc34f18f0923463b6612b969b2cb52418c8c3ddc87e94b2" exitCode=0 Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.365317 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" event={"ID":"0c90af26-b1fa-4faf-bbba-903dc47f7a46","Type":"ContainerDied","Data":"2741865883b289a2cdc34f18f0923463b6612b969b2cb52418c8c3ddc87e94b2"} Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.370121 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" event={"ID":"24a3b544-3082-45d9-956f-1540b9725ea2","Type":"ContainerStarted","Data":"6021c95366f6b5008fc2866e0f77ab8b7263977abea7336c2858940ac4b99531"} Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.370193 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" event={"ID":"24a3b544-3082-45d9-956f-1540b9725ea2","Type":"ContainerStarted","Data":"bdb45b938185cbf80590f084664ca65037c2239b23baf6aa0edcd3a72b10923e"} Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.382524 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.385340 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cscjz\" (UniqueName: \"kubernetes.io/projected/50052522-e0ba-4c6f-9b01-441578281afa-kube-api-access-cscjz\") pod \"service-ca-operator-777779d784-58t9w\" (UID: \"50052522-e0ba-4c6f-9b01-441578281afa\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.398412 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g9jf\" (UniqueName: \"kubernetes.io/projected/19bfb345-68d7-418c-9695-2842e8b9f53a-kube-api-access-8g9jf\") pod \"catalog-operator-68c6474976-472lc\" (UID: \"19bfb345-68d7-418c-9695-2842e8b9f53a\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.409838 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.411745 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.419615 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-km97p\" (UniqueName: \"kubernetes.io/projected/389ef0a8-f13c-4eb7-8293-cecaff735697-kube-api-access-km97p\") pod \"kube-storage-version-migrator-operator-b67b599dd-4ntrw\" (UID: \"389ef0a8-f13c-4eb7-8293-cecaff735697\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:29 crc kubenswrapper[4767]: W0128 18:31:29.430123 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2c588228_398e_4f20_bf14_03450184cc20.slice/crio-b19d2ccf5a58f1aecdff6a9e5d09817918e4bd5f12ade1fb7c5d2c9785a0bc4d WatchSource:0}: Error finding container b19d2ccf5a58f1aecdff6a9e5d09817918e4bd5f12ade1fb7c5d2c9785a0bc4d: Status 404 returned error can't find the container with id b19d2ccf5a58f1aecdff6a9e5d09817918e4bd5f12ade1fb7c5d2c9785a0bc4d Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.434338 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g5ggr\" (UniqueName: \"kubernetes.io/projected/bef36296-0ea5-4316-a31a-c14346fc1597-kube-api-access-g5ggr\") pod \"control-plane-machine-set-operator-78cbb6b69f-ctkl7\" (UID: \"bef36296-0ea5-4316-a31a-c14346fc1597\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.436338 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.441631 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.461237 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ff24w\" (UniqueName: \"kubernetes.io/projected/30268a6d-2d26-4c9e-8692-65dd9e6d75e1-kube-api-access-ff24w\") pod \"machine-config-operator-74547568cd-rll48\" (UID: \"30268a6d-2d26-4c9e-8692-65dd9e6d75e1\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.477491 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/5cdebe9f-d5ed-4fd3-9648-6cea02821835-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-nrw74\" (UID: \"5cdebe9f-d5ed-4fd3-9648-6cea02821835\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.485234 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.489894 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.502719 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjqdx\" (UniqueName: \"kubernetes.io/projected/fd30d9e8-8c0c-4db4-8923-7a7a03e1df31-kube-api-access-rjqdx\") pod \"dns-operator-744455d44c-kx6dk\" (UID: \"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31\") " pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.518413 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dn4t2\" (UniqueName: \"kubernetes.io/projected/67ab6cf3-b8b2-4adb-a126-b0aa842964e8-kube-api-access-dn4t2\") pod \"machine-config-controller-84d6567774-r4jlz\" (UID: \"67ab6cf3-b8b2-4adb-a126-b0aa842964e8\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.602766 4767 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.609251 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.609686 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.609832 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b803ae3e-471d-4c86-a375-c4e7ab4403cd-installation-pull-secrets\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.609882 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-certificates\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.609924 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-tls\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.609937 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.610020 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.610301 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b803ae3e-471d-4c86-a375-c4e7ab4403cd-ca-trust-extracted\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.610545 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-trusted-ca\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.610591 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-bound-sa-token\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.611320 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vxfp\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-kube-api-access-4vxfp\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.612499 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.613090 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s"] Jan 28 18:31:29 crc kubenswrapper[4767]: E0128 18:31:29.614986 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.114964075 +0000 UTC m=+96.079147019 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.648413 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-b2scv"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.651619 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.676170 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.690025 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.699488 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716107 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:29 crc kubenswrapper[4767]: W0128 18:31:29.716219 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode10737ed_6d67_4dd1_9529_d46265d7cb29.slice/crio-6b74335600969d801b2f26d9ade9d1ede70df225e142bafa5d675e4f9aa08a37 WatchSource:0}: Error finding container 6b74335600969d801b2f26d9ade9d1ede70df225e142bafa5d675e4f9aa08a37: Status 404 returned error can't find the container with id 6b74335600969d801b2f26d9ade9d1ede70df225e142bafa5d675e4f9aa08a37 Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716352 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vp6cd\" (UniqueName: \"kubernetes.io/projected/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-kube-api-access-vp6cd\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716396 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b803ae3e-471d-4c86-a375-c4e7ab4403cd-ca-trust-extracted\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716420 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-trusted-ca\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716447 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-bound-sa-token\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716464 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vxfp\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-kube-api-access-4vxfp\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716482 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-csi-data-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716501 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-trusted-ca\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716515 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716530 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716565 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss686\" (UniqueName: \"kubernetes.io/projected/b57f864b-8ab2-499a-a47e-b4a4c62842e7-kube-api-access-ss686\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.716595 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: E0128 18:31:29.717540 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.217521766 +0000 UTC m=+96.181704640 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719322 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-plugins-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719432 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719465 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/583ee7da-190f-4eae-813d-e195a64c236f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8st5n\" (UID: \"583ee7da-190f-4eae-813d-e195a64c236f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719514 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-mountpoint-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719537 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssgcf\" (UniqueName: \"kubernetes.io/projected/921a23b8-84dd-478c-b1f8-3e9f812d79cb-kube-api-access-ssgcf\") pod \"migrator-59844c95c7-vsxcm\" (UID: \"921a23b8-84dd-478c-b1f8-3e9f812d79cb\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719550 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-registration-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719678 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swrx9\" (UniqueName: \"kubernetes.io/projected/583ee7da-190f-4eae-813d-e195a64c236f-kube-api-access-swrx9\") pod \"cluster-samples-operator-665b6dd947-8st5n\" (UID: \"583ee7da-190f-4eae-813d-e195a64c236f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719721 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqr5s\" (UniqueName: \"kubernetes.io/projected/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-kube-api-access-gqr5s\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.719795 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-metrics-tls\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: E0128 18:31:29.722083 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.222069821 +0000 UTC m=+96.186252695 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.726445 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b803ae3e-471d-4c86-a375-c4e7ab4403cd-installation-pull-secrets\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.726774 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b803ae3e-471d-4c86-a375-c4e7ab4403cd-ca-trust-extracted\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.728127 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.728429 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-trusted-ca\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.728786 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-certificates\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.729102 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-socket-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.729215 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-tls\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.729850 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-certificates\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.733969 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b803ae3e-471d-4c86-a375-c4e7ab4403cd-installation-pull-secrets\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.734045 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-tls\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.760351 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-bound-sa-token\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.765131 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vxfp\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-kube-api-access-4vxfp\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.776694 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.778665 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-b7z9l"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.779843 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-qfdzz"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.829882 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830114 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/604a371d-6aff-4eb9-85b7-3f426d1bdc5c-cert\") pod \"ingress-canary-g62xj\" (UID: \"604a371d-6aff-4eb9-85b7-3f426d1bdc5c\") " pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830172 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-socket-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830199 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vp6cd\" (UniqueName: \"kubernetes.io/projected/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-kube-api-access-vp6cd\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830298 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/121787da-7b66-4cec-af46-774c1d87cc40-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9n4rf\" (UID: \"121787da-7b66-4cec-af46-774c1d87cc40\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830343 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/595c726d-77f3-427f-a098-2301abc05d3b-metrics-tls\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830414 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-csi-data-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830433 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8d95\" (UniqueName: \"kubernetes.io/projected/14f31472-a674-46f3-a8db-faf5eeb91cb9-kube-api-access-g8d95\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830477 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-certs\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830506 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-trusted-ca\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: E0128 18:31:29.830536 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.33051672 +0000 UTC m=+96.294699594 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830571 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830597 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830627 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/14f31472-a674-46f3-a8db-faf5eeb91cb9-tmpfs\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830690 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-node-bootstrap-token\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830833 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss686\" (UniqueName: \"kubernetes.io/projected/b57f864b-8ab2-499a-a47e-b4a4c62842e7-kube-api-access-ss686\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830900 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/595c726d-77f3-427f-a098-2301abc05d3b-config-volume\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.830954 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831002 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14f31472-a674-46f3-a8db-faf5eeb91cb9-webhook-cert\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831019 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzl57\" (UniqueName: \"kubernetes.io/projected/bcde5fe1-42de-449a-9743-2f313a33659a-kube-api-access-dzl57\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831053 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-plugins-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831070 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpwr4\" (UniqueName: \"kubernetes.io/projected/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-kube-api-access-rpwr4\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831084 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bcde5fe1-42de-449a-9743-2f313a33659a-config-volume\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831109 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831126 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-586q4\" (UniqueName: \"kubernetes.io/projected/604a371d-6aff-4eb9-85b7-3f426d1bdc5c-kube-api-access-586q4\") pod \"ingress-canary-g62xj\" (UID: \"604a371d-6aff-4eb9-85b7-3f426d1bdc5c\") " pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831170 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/583ee7da-190f-4eae-813d-e195a64c236f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8st5n\" (UID: \"583ee7da-190f-4eae-813d-e195a64c236f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831200 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-mountpoint-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831246 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssgcf\" (UniqueName: \"kubernetes.io/projected/921a23b8-84dd-478c-b1f8-3e9f812d79cb-kube-api-access-ssgcf\") pod \"migrator-59844c95c7-vsxcm\" (UID: \"921a23b8-84dd-478c-b1f8-3e9f812d79cb\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831263 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-registration-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831365 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swrx9\" (UniqueName: \"kubernetes.io/projected/583ee7da-190f-4eae-813d-e195a64c236f-kube-api-access-swrx9\") pod \"cluster-samples-operator-665b6dd947-8st5n\" (UID: \"583ee7da-190f-4eae-813d-e195a64c236f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831383 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r68xg\" (UniqueName: \"kubernetes.io/projected/121787da-7b66-4cec-af46-774c1d87cc40-kube-api-access-r68xg\") pod \"package-server-manager-789f6589d5-9n4rf\" (UID: \"121787da-7b66-4cec-af46-774c1d87cc40\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831410 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2xzq\" (UniqueName: \"kubernetes.io/projected/595c726d-77f3-427f-a098-2301abc05d3b-kube-api-access-r2xzq\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831429 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqr5s\" (UniqueName: \"kubernetes.io/projected/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-kube-api-access-gqr5s\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831449 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bcde5fe1-42de-449a-9743-2f313a33659a-secret-volume\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831487 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-metrics-tls\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.831501 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14f31472-a674-46f3-a8db-faf5eeb91cb9-apiservice-cert\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.832590 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-trusted-ca\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.834371 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-socket-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.834515 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-csi-data-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.834626 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-registration-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.836677 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-mountpoint-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: E0128 18:31:29.836809 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.33677725 +0000 UTC m=+96.300960314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.838390 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.839167 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-plugins-dir\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.845908 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-metrics-tls\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.852248 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.856260 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/583ee7da-190f-4eae-813d-e195a64c236f-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-8st5n\" (UID: \"583ee7da-190f-4eae-813d-e195a64c236f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.857584 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vp6cd\" (UniqueName: \"kubernetes.io/projected/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-kube-api-access-vp6cd\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.899711 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssgcf\" (UniqueName: \"kubernetes.io/projected/921a23b8-84dd-478c-b1f8-3e9f812d79cb-kube-api-access-ssgcf\") pod \"migrator-59844c95c7-vsxcm\" (UID: \"921a23b8-84dd-478c-b1f8-3e9f812d79cb\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.902665 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp"] Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.912597 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqr5s\" (UniqueName: \"kubernetes.io/projected/ecf41ec8-6c32-4b5c-a157-f52cc4de995a-kube-api-access-gqr5s\") pod \"csi-hostpathplugin-5h8dc\" (UID: \"ecf41ec8-6c32-4b5c-a157-f52cc4de995a\") " pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.932728 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.932853 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/121787da-7b66-4cec-af46-774c1d87cc40-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9n4rf\" (UID: \"121787da-7b66-4cec-af46-774c1d87cc40\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.932891 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/595c726d-77f3-427f-a098-2301abc05d3b-metrics-tls\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: E0128 18:31:29.932931 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.432908896 +0000 UTC m=+96.397091790 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933007 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8d95\" (UniqueName: \"kubernetes.io/projected/14f31472-a674-46f3-a8db-faf5eeb91cb9-kube-api-access-g8d95\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933050 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-certs\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933078 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/14f31472-a674-46f3-a8db-faf5eeb91cb9-tmpfs\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933104 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-node-bootstrap-token\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933150 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/595c726d-77f3-427f-a098-2301abc05d3b-config-volume\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933187 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14f31472-a674-46f3-a8db-faf5eeb91cb9-webhook-cert\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933265 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzl57\" (UniqueName: \"kubernetes.io/projected/bcde5fe1-42de-449a-9743-2f313a33659a-kube-api-access-dzl57\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933304 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpwr4\" (UniqueName: \"kubernetes.io/projected/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-kube-api-access-rpwr4\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933332 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bcde5fe1-42de-449a-9743-2f313a33659a-config-volume\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933361 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933384 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-586q4\" (UniqueName: \"kubernetes.io/projected/604a371d-6aff-4eb9-85b7-3f426d1bdc5c-kube-api-access-586q4\") pod \"ingress-canary-g62xj\" (UID: \"604a371d-6aff-4eb9-85b7-3f426d1bdc5c\") " pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933457 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r68xg\" (UniqueName: \"kubernetes.io/projected/121787da-7b66-4cec-af46-774c1d87cc40-kube-api-access-r68xg\") pod \"package-server-manager-789f6589d5-9n4rf\" (UID: \"121787da-7b66-4cec-af46-774c1d87cc40\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933485 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2xzq\" (UniqueName: \"kubernetes.io/projected/595c726d-77f3-427f-a098-2301abc05d3b-kube-api-access-r2xzq\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933511 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bcde5fe1-42de-449a-9743-2f313a33659a-secret-volume\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933556 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14f31472-a674-46f3-a8db-faf5eeb91cb9-apiservice-cert\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.933590 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/604a371d-6aff-4eb9-85b7-3f426d1bdc5c-cert\") pod \"ingress-canary-g62xj\" (UID: \"604a371d-6aff-4eb9-85b7-3f426d1bdc5c\") " pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:29 crc kubenswrapper[4767]: E0128 18:31:29.936232 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.436214592 +0000 UTC m=+96.400397466 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.936354 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/14f31472-a674-46f3-a8db-faf5eeb91cb9-tmpfs\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.937452 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bcde5fe1-42de-449a-9743-2f313a33659a-config-volume\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.937788 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/595c726d-77f3-427f-a098-2301abc05d3b-config-volume\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.937809 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/595c726d-77f3-427f-a098-2301abc05d3b-metrics-tls\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.939132 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cd5f7195-94fd-45d5-bf00-ceecdcb22ea0-bound-sa-token\") pod \"ingress-operator-5b745b69d9-v7787\" (UID: \"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.939261 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/121787da-7b66-4cec-af46-774c1d87cc40-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-9n4rf\" (UID: \"121787da-7b66-4cec-af46-774c1d87cc40\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.940316 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bcde5fe1-42de-449a-9743-2f313a33659a-secret-volume\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.940568 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-certs\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.941269 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/14f31472-a674-46f3-a8db-faf5eeb91cb9-apiservice-cert\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.942332 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-node-bootstrap-token\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.948386 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/14f31472-a674-46f3-a8db-faf5eeb91cb9-webhook-cert\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.955597 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/604a371d-6aff-4eb9-85b7-3f426d1bdc5c-cert\") pod \"ingress-canary-g62xj\" (UID: \"604a371d-6aff-4eb9-85b7-3f426d1bdc5c\") " pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.961225 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swrx9\" (UniqueName: \"kubernetes.io/projected/583ee7da-190f-4eae-813d-e195a64c236f-kube-api-access-swrx9\") pod \"cluster-samples-operator-665b6dd947-8st5n\" (UID: \"583ee7da-190f-4eae-813d-e195a64c236f\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.966379 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" Jan 28 18:31:29 crc kubenswrapper[4767]: I0128 18:31:29.971573 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss686\" (UniqueName: \"kubernetes.io/projected/b57f864b-8ab2-499a-a47e-b4a4c62842e7-kube-api-access-ss686\") pod \"marketplace-operator-79b997595-gxsvp\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.016309 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpwr4\" (UniqueName: \"kubernetes.io/projected/88b3dd6f-8bee-4f26-92b4-b9c79f847a6f-kube-api-access-rpwr4\") pod \"machine-config-server-t9n45\" (UID: \"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f\") " pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.031364 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8d95\" (UniqueName: \"kubernetes.io/projected/14f31472-a674-46f3-a8db-faf5eeb91cb9-kube-api-access-g8d95\") pod \"packageserver-d55dfcdfc-pmb2t\" (UID: \"14f31472-a674-46f3-a8db-faf5eeb91cb9\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.034337 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.034723 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.534705931 +0000 UTC m=+96.498888805 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.048715 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.054309 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r68xg\" (UniqueName: \"kubernetes.io/projected/121787da-7b66-4cec-af46-774c1d87cc40-kube-api-access-r68xg\") pod \"package-server-manager-789f6589d5-9n4rf\" (UID: \"121787da-7b66-4cec-af46-774c1d87cc40\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.055116 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.074809 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-586q4\" (UniqueName: \"kubernetes.io/projected/604a371d-6aff-4eb9-85b7-3f426d1bdc5c-kube-api-access-586q4\") pod \"ingress-canary-g62xj\" (UID: \"604a371d-6aff-4eb9-85b7-3f426d1bdc5c\") " pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.093446 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzl57\" (UniqueName: \"kubernetes.io/projected/bcde5fe1-42de-449a-9743-2f313a33659a-kube-api-access-dzl57\") pod \"collect-profiles-29493750-rcm4m\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.107538 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.120433 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pbbt4"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.128441 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2xzq\" (UniqueName: \"kubernetes.io/projected/595c726d-77f3-427f-a098-2301abc05d3b-kube-api-access-r2xzq\") pod \"dns-default-4tbt4\" (UID: \"595c726d-77f3-427f-a098-2301abc05d3b\") " pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.137087 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.137455 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.637440218 +0000 UTC m=+96.601623092 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.142735 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.146104 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-rll48"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.161039 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.181621 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.185982 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.199832 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.226017 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-rr2mf"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.228255 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-q6cnp"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.238694 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-g62xj" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.238744 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.239021 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.739005086 +0000 UTC m=+96.703187960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: W0128 18:31:30.245476 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19bfb345_68d7_418c_9695_2842e8b9f53a.slice/crio-786ae33a21188b48097556d9db58fff9f75c7bb2d12cad06fceda26ec3d3eb5e WatchSource:0}: Error finding container 786ae33a21188b48097556d9db58fff9f75c7bb2d12cad06fceda26ec3d3eb5e: Status 404 returned error can't find the container with id 786ae33a21188b48097556d9db58fff9f75c7bb2d12cad06fceda26ec3d3eb5e Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.285704 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-58t9w"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.289708 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-t9n45" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.298449 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.340415 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.340835 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.840808983 +0000 UTC m=+96.804991857 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.349274 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.384607 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.444974 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.445435 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.945407819 +0000 UTC m=+96.909590693 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.445587 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.445852 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:30.945844553 +0000 UTC m=+96.910027427 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.453055 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" event={"ID":"19bfb345-68d7-418c-9695-2842e8b9f53a","Type":"ContainerStarted","Data":"786ae33a21188b48097556d9db58fff9f75c7bb2d12cad06fceda26ec3d3eb5e"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.457947 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" event={"ID":"c9eb3f6d-70d5-4250-90e7-1046f1c1d370","Type":"ContainerStarted","Data":"98772a33b04b89b13cf8b53a10d3ef2d4ea6e5ab1a00a329ce1190dc971ef19a"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.461717 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" event={"ID":"302a671f-1c82-402b-9450-d27a1566dc3f","Type":"ContainerStarted","Data":"6cdfd3d435bb5d30a0ed954d0175e958b7fa02dc517fb303226e7e914a7f9ab8"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.475222 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" event={"ID":"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0","Type":"ContainerStarted","Data":"75e07472e49ac1c56f0714f46d8080b4fc7f0ef9d1f4762e4f6a35dd00d2a729"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.490526 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" event={"ID":"65d6565f-d0b8-441e-8f17-293368650b57","Type":"ContainerStarted","Data":"585e7c36cfd0df16a83e136b99c9257c208b04934ab583f52b18b9c61c31073c"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.510617 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" event={"ID":"a5c87c48-f06a-4f35-a336-2d74a88c40ac","Type":"ContainerStarted","Data":"4dd5861936114db2f87774c10514331c2ecee4dbc785e6e5418f75e6e10e3a77"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.519010 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" event={"ID":"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec","Type":"ContainerStarted","Data":"6b34ae3ae551f24c7b43133f5facb6b2e0cb0007a5164e5261d184dc15364308"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.519066 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" event={"ID":"05a4d5bc-8b9a-4742-b21d-ba3e7229a2ec","Type":"ContainerStarted","Data":"f5f5f40fa9b67931aca702ea3d485d8a8a21daadec44f3ba9df32bee1941772f"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.523384 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" podStartSLOduration=72.523342472 podStartE2EDuration="1m12.523342472s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:30.518295611 +0000 UTC m=+96.482478495" watchObservedRunningTime="2026-01-28 18:31:30.523342472 +0000 UTC m=+96.487525336" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.523512 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" event={"ID":"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e","Type":"ContainerStarted","Data":"a365089df8bd01980253be6ee591cd09a0ee0d65fb60924339578b738c4da449"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.543491 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" event={"ID":"e10737ed-6d67-4dd1-9529-d46265d7cb29","Type":"ContainerStarted","Data":"6b74335600969d801b2f26d9ade9d1ede70df225e142bafa5d675e4f9aa08a37"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.545639 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" event={"ID":"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3","Type":"ContainerStarted","Data":"1dda01eb5191d42125cabd54198b002939c253e0c7e397d2e669d0fdf797db9b"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.545981 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.546624 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.046606816 +0000 UTC m=+97.010789690 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.546740 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.547198 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.047181424 +0000 UTC m=+97.011364328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.554538 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" event={"ID":"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc","Type":"ContainerStarted","Data":"a0a5dfc629efec00e0ff9f416b43f957e11827ba0033400ad877006c6d29efd6"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.563876 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" event={"ID":"e43f862d-aa02-45c1-ab71-9b8235aacd37","Type":"ContainerStarted","Data":"d623ed8b9c24a094845fb84f7d036f63dc701d5ad12f13531033971409858411"} Jan 28 18:31:30 crc kubenswrapper[4767]: W0128 18:31:30.568825 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbef36296_0ea5_4316_a31a_c14346fc1597.slice/crio-22f19de71ae5c06be1d2dd0e41406f2a25269cac82d5c9fe678b2100382c48a8 WatchSource:0}: Error finding container 22f19de71ae5c06be1d2dd0e41406f2a25269cac82d5c9fe678b2100382c48a8: Status 404 returned error can't find the container with id 22f19de71ae5c06be1d2dd0e41406f2a25269cac82d5c9fe678b2100382c48a8 Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.569158 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdzz" event={"ID":"1ac82155-1d09-4371-869a-e7edb9c4d5bc","Type":"ContainerStarted","Data":"a50f98706b484dedf9b4a0cb1279c7c96f9ba1d38115aee7a317e075bb1a5eeb"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.571967 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" event={"ID":"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a","Type":"ContainerStarted","Data":"6bec9b5e01fffc7b3d852e96e63051e7cf48c8850f0d5ed113bd3f53fa4649a7"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.572496 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.573670 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.573700 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.599130 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" event={"ID":"9414071e-c859-4b9b-a1b2-51026b6886d7","Type":"ContainerStarted","Data":"c49c50fd3624673d065aeea89d261554ce85578f6644d1919278796947f30a09"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.604286 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" event={"ID":"30268a6d-2d26-4c9e-8692-65dd9e6d75e1","Type":"ContainerStarted","Data":"36b56c5d4e75e69b70381ef8dd62c8c2078f57b63fcc9373d7d7e46cb4715fd5"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.605556 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ttsxj" event={"ID":"2c588228-398e-4f20-bf14-03450184cc20","Type":"ContainerStarted","Data":"b19d2ccf5a58f1aecdff6a9e5d09817918e4bd5f12ade1fb7c5d2c9785a0bc4d"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.606887 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" event={"ID":"72d10cb6-6739-48fc-a732-8e8981bf80c9","Type":"ContainerStarted","Data":"b2c4f9045c363c45bbc99ae053ab2a2ec4c324fb4fa99ba2b7bffd019650bf52"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.611470 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" event={"ID":"cb68a9be-651d-4412-a679-b8da3905f2dc","Type":"ContainerStarted","Data":"3a45c24dbfc1861097345525d4703e7775b2b5c66b064d22e32baea6e90247ff"} Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.629930 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-cwpk4" podStartSLOduration=74.629909091 podStartE2EDuration="1m14.629909091s" podCreationTimestamp="2026-01-28 18:30:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:30.627615848 +0000 UTC m=+96.591798732" watchObservedRunningTime="2026-01-28 18:31:30.629909091 +0000 UTC m=+96.594091965" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.647846 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.743956 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.243907057 +0000 UTC m=+97.208089941 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.760553 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.762947 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.262930206 +0000 UTC m=+97.227113080 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.798912 4767 csr.go:261] certificate signing request csr-z9d7c is approved, waiting to be issued Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.805913 4767 csr.go:257] certificate signing request csr-z9d7c is issued Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.835590 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-n85dl"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.840516 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.846043 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.847960 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.863245 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.863389 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.363362908 +0000 UTC m=+97.327545782 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.863676 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.864040 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.36403166 +0000 UTC m=+97.328214694 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.902136 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-vqr6q" podStartSLOduration=72.902118909 podStartE2EDuration="1m12.902118909s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:30.874787644 +0000 UTC m=+96.838970518" watchObservedRunningTime="2026-01-28 18:31:30.902118909 +0000 UTC m=+96.866301783" Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.903764 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-kx6dk"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.905376 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-4svb7"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.958214 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm"] Jan 28 18:31:30 crc kubenswrapper[4767]: I0128 18:31:30.964702 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:30 crc kubenswrapper[4767]: E0128 18:31:30.965112 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.465077492 +0000 UTC m=+97.429260366 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.065881 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.066152 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.566139605 +0000 UTC m=+97.530322479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.167512 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.167808 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.667783926 +0000 UTC m=+97.631966800 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.167849 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.168216 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.66819434 +0000 UTC m=+97.632377284 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.268988 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.269187 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.769164199 +0000 UTC m=+97.733347073 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.269852 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.270260 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.770244973 +0000 UTC m=+97.734427847 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.370978 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.372303 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.872276888 +0000 UTC m=+97.836459762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.373013 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.373493 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.873480666 +0000 UTC m=+97.837663540 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.426001 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-g62xj"] Jan 28 18:31:31 crc kubenswrapper[4767]: W0128 18:31:31.443334 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod604a371d_6aff_4eb9_85b7_3f426d1bdc5c.slice/crio-45aef99589be3fdd53254119bf45c0bb01d2126c7def0901bab3b3354ae57612 WatchSource:0}: Error finding container 45aef99589be3fdd53254119bf45c0bb01d2126c7def0901bab3b3354ae57612: Status 404 returned error can't find the container with id 45aef99589be3fdd53254119bf45c0bb01d2126c7def0901bab3b3354ae57612 Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.452810 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxsvp"] Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.476070 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.477245 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:31.977219894 +0000 UTC m=+97.941402778 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.577980 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.578902 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.078888116 +0000 UTC m=+98.043070990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.619827 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" event={"ID":"921a23b8-84dd-478c-b1f8-3e9f812d79cb","Type":"ContainerStarted","Data":"8deb940eaca8ea9202ebc8354300b299d3c3a036ac1ff85434319b8fb23153cd"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.622737 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" event={"ID":"302a671f-1c82-402b-9450-d27a1566dc3f","Type":"ContainerStarted","Data":"adc58f17be00eabde5af7b9cb1869f8db42475227dd6abd56c2183caad62d04c"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.639037 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t"] Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.648513 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" event={"ID":"c5e837ca-e053-4dc1-9e57-ddc248ba1cd3","Type":"ContainerStarted","Data":"4117b760e0a5e02d58d7ed0e386afc48ad14b66a297ce5f25628efdb6fb6006a"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.655969 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n"] Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.672258 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m"] Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.674154 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" event={"ID":"72d10cb6-6739-48fc-a732-8e8981bf80c9","Type":"ContainerStarted","Data":"a1cbfc1d3516062425e481d54fef001c98a8ebc60b66e9044ad7ea2d733dd428"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.679554 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.680033 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-7jkqc" podStartSLOduration=73.680021851 podStartE2EDuration="1m13.680021851s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.674428303 +0000 UTC m=+97.638611177" watchObservedRunningTime="2026-01-28 18:31:31.680021851 +0000 UTC m=+97.644204725" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.680969 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.180953741 +0000 UTC m=+98.145136615 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.690170 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" event={"ID":"9414071e-c859-4b9b-a1b2-51026b6886d7","Type":"ContainerStarted","Data":"00092f77e127076a8042f8d464372fcf6cbafbd184191ca9dab524b3ffcd078e"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.690759 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.693381 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-4tbt4"] Jan 28 18:31:31 crc kubenswrapper[4767]: W0128 18:31:31.693812 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14f31472_a674_46f3_a8db_faf5eeb91cb9.slice/crio-adedcd58925894707481db2ef23a6f6e25c04d588f976eaef5df290fe322f8ad WatchSource:0}: Error finding container adedcd58925894707481db2ef23a6f6e25c04d588f976eaef5df290fe322f8ad: Status 404 returned error can't find the container with id adedcd58925894707481db2ef23a6f6e25c04d588f976eaef5df290fe322f8ad Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.694177 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" event={"ID":"67ab6cf3-b8b2-4adb-a126-b0aa842964e8","Type":"ContainerStarted","Data":"1311bc6404db8039fe7d84fde24b246ad7fdd4d095b88a2f5a69c22bca905795"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.694991 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-5h8dc"] Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.698380 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-t9n45" event={"ID":"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f","Type":"ContainerStarted","Data":"d8eedfa82cd71441490f613fab40bc9812c1f1676ff9e741984dc1fc937fd8fa"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.699704 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-4svb7" event={"ID":"bfa657b3-28e7-4503-a332-a2cd83725356","Type":"ContainerStarted","Data":"354b6a37491ae9ea1f68ff04557c37012437b6cf0c0e55d11a643be7cfab3100"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.702517 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf"] Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.702638 4767 patch_prober.go:28] interesting pod/console-operator-58897d9998-b7z9l container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.702667 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" podUID="9414071e-c859-4b9b-a1b2-51026b6886d7" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.704770 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" event={"ID":"30268a6d-2d26-4c9e-8692-65dd9e6d75e1","Type":"ContainerStarted","Data":"effb533473e1abdb864aa58671cfbb6b9f982c8c204204f95fb5776eab4ead40"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.707155 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" event={"ID":"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0","Type":"ContainerStarted","Data":"21059a77fb73980c706f8a6cc8ddd4a644d2d6c8772834feb1043e3d76c01f65"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.707516 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.710353 4767 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx7rs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 18:31:31 crc kubenswrapper[4767]: W0128 18:31:31.710803 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbcde5fe1_42de_449a_9743_2f313a33659a.slice/crio-1e20de379fc231bd77b7642446a4a8bfca561e2a6f19127b14f90a36af0f95dd WatchSource:0}: Error finding container 1e20de379fc231bd77b7642446a4a8bfca561e2a6f19127b14f90a36af0f95dd: Status 404 returned error can't find the container with id 1e20de379fc231bd77b7642446a4a8bfca561e2a6f19127b14f90a36af0f95dd Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.711004 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bgjcd" podStartSLOduration=73.710990802 podStartE2EDuration="1m13.710990802s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.710553628 +0000 UTC m=+97.674736502" watchObservedRunningTime="2026-01-28 18:31:31.710990802 +0000 UTC m=+97.675173676" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.711102 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-ttsxj" event={"ID":"2c588228-398e-4f20-bf14-03450184cc20","Type":"ContainerStarted","Data":"95f5bb3b4e01e8c8caf232e5a87ec0128e411f64583f391c37b1430f8e511a2f"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.711129 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.722888 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" event={"ID":"0c90af26-b1fa-4faf-bbba-903dc47f7a46","Type":"ContainerStarted","Data":"95e818a040c624eaf808c98a47665721c594f77bb9c3d18b49d1ef90fad71bfa"} Jan 28 18:31:31 crc kubenswrapper[4767]: W0128 18:31:31.729082 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod121787da_7b66_4cec_af46_774c1d87cc40.slice/crio-6bfbab9a7b453aa6c0f31ee4769affe707d4d58ae70e2b246ae5c084c0285789 WatchSource:0}: Error finding container 6bfbab9a7b453aa6c0f31ee4769affe707d4d58ae70e2b246ae5c084c0285789: Status 404 returned error can't find the container with id 6bfbab9a7b453aa6c0f31ee4769affe707d4d58ae70e2b246ae5c084c0285789 Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.729822 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" event={"ID":"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31","Type":"ContainerStarted","Data":"bf5ea060fb084d6a8006b3ba403e3f0d90b8db436532c47e69ebea324e11b7b0"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.731086 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" event={"ID":"5cdebe9f-d5ed-4fd3-9648-6cea02821835","Type":"ContainerStarted","Data":"d83e2115c70086511f5c2a2991fcd170e19785edb1cd29ba238c0c53bfd39066"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.733929 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdzz" event={"ID":"1ac82155-1d09-4371-869a-e7edb9c4d5bc","Type":"ContainerStarted","Data":"b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.735978 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" event={"ID":"50052522-e0ba-4c6f-9b01-441578281afa","Type":"ContainerStarted","Data":"56c43e33c022749c82b8f04081428427ea15277ca364c368a1dba3a8cdb5a06d"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.738241 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-g62xj" event={"ID":"604a371d-6aff-4eb9-85b7-3f426d1bdc5c","Type":"ContainerStarted","Data":"45aef99589be3fdd53254119bf45c0bb01d2126c7def0901bab3b3354ae57612"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.740275 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" event={"ID":"a15f9dac-80ce-424b-a29e-a0ffaaa7e8fc","Type":"ContainerStarted","Data":"79d9c7fd4609e7333616b1cdf9a76598fa55b47f064b9a90572f7db753c9dd67"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.742003 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" event={"ID":"bef36296-0ea5-4316-a31a-c14346fc1597","Type":"ContainerStarted","Data":"22f19de71ae5c06be1d2dd0e41406f2a25269cac82d5c9fe678b2100382c48a8"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.743098 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" event={"ID":"b57f864b-8ab2-499a-a47e-b4a4c62842e7","Type":"ContainerStarted","Data":"df375098e14acb6e947cc1da2ce4705c5e16f79c0372d8d3185e2c36a839bed2"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.744480 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" event={"ID":"e10737ed-6d67-4dd1-9529-d46265d7cb29","Type":"ContainerStarted","Data":"7d6062391af37e3dc9183e3dec4821777ac6dc3a47b3655d805a32647410a98c"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.745766 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" event={"ID":"c9eb3f6d-70d5-4250-90e7-1046f1c1d370","Type":"ContainerStarted","Data":"1c82e895a551909b92420194f63d50a031d3d6f2c8871ddec281eba1166f92d2"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.746756 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" event={"ID":"389ef0a8-f13c-4eb7-8293-cecaff735697","Type":"ContainerStarted","Data":"8ddf1ad6fc75cccf5441f6bd623299f69e5bbb9f812c753e00da4fe470a68f33"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.754481 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" event={"ID":"dbc51af2-c004-4115-a615-9d98b418c70f","Type":"ContainerStarted","Data":"5c7d44237d9ed00b223372305921f93e8dc600d9b5a936e472508087e303d91a"} Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.756610 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.756674 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.781716 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.787635 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.287616372 +0000 UTC m=+98.251799246 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.787186 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-vc67h" podStartSLOduration=74.787167488 podStartE2EDuration="1m14.787167488s" podCreationTimestamp="2026-01-28 18:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.750876607 +0000 UTC m=+97.715059481" watchObservedRunningTime="2026-01-28 18:31:31.787167488 +0000 UTC m=+97.751350362" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.791216 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-v7787"] Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.794702 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podStartSLOduration=73.794687289 podStartE2EDuration="1m13.794687289s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.793545872 +0000 UTC m=+97.757728746" watchObservedRunningTime="2026-01-28 18:31:31.794687289 +0000 UTC m=+97.758870163" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.812171 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-28 18:26:30 +0000 UTC, rotation deadline is 2026-11-26 03:37:05.363047426 +0000 UTC Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.812320 4767 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7233h5m33.550730754s for next certificate rotation Jan 28 18:31:31 crc kubenswrapper[4767]: W0128 18:31:31.854551 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd5f7195_94fd_45d5_bf00_ceecdcb22ea0.slice/crio-d5cbeeffd4e233ec2de722f9716ae97e416e2a7bb6dea15877983e4eba5406c8 WatchSource:0}: Error finding container d5cbeeffd4e233ec2de722f9716ae97e416e2a7bb6dea15877983e4eba5406c8: Status 404 returned error can't find the container with id d5cbeeffd4e233ec2de722f9716ae97e416e2a7bb6dea15877983e4eba5406c8 Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.873586 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-b2rv8" podStartSLOduration=73.873567963 podStartE2EDuration="1m13.873567963s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.873102318 +0000 UTC m=+97.837285192" watchObservedRunningTime="2026-01-28 18:31:31.873567963 +0000 UTC m=+97.837750837" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.874969 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podStartSLOduration=73.874960427 podStartE2EDuration="1m13.874960427s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.840355389 +0000 UTC m=+97.804538263" watchObservedRunningTime="2026-01-28 18:31:31.874960427 +0000 UTC m=+97.839143301" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.890643 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.891010 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.390992419 +0000 UTC m=+98.355175293 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.910708 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-ttsxj" podStartSLOduration=73.910690249 podStartE2EDuration="1m13.910690249s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.908351915 +0000 UTC m=+97.872534799" watchObservedRunningTime="2026-01-28 18:31:31.910690249 +0000 UTC m=+97.874873123" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.934868 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.935651 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.940480 4767 patch_prober.go:28] interesting pod/apiserver-7bbb656c7d-svn7g container/oauth-apiserver namespace/openshift-oauth-apiserver: Startup probe status=failure output="Get \"https://10.217.0.9:8443/livez\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.940517 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" podUID="c9ceeaa1-d900-4c38-89ef-31abebe17be9" containerName="oauth-apiserver" probeResult="failure" output="Get \"https://10.217.0.9:8443/livez\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.951853 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" podStartSLOduration=73.951839036 podStartE2EDuration="1m13.951839036s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:31.950614997 +0000 UTC m=+97.914797891" watchObservedRunningTime="2026-01-28 18:31:31.951839036 +0000 UTC m=+97.916021930" Jan 28 18:31:31 crc kubenswrapper[4767]: I0128 18:31:31.992611 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:31 crc kubenswrapper[4767]: E0128 18:31:31.993651 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.493635643 +0000 UTC m=+98.457818517 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.094459 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.094555 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.594537641 +0000 UTC m=+98.558720515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.094740 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.094990 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.594982245 +0000 UTC m=+98.559165119 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.197519 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.198456 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.698435844 +0000 UTC m=+98.662618728 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.299055 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.299384 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.799372253 +0000 UTC m=+98.763555117 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.319545 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.321296 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.321332 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.400230 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.400540 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:32.900525208 +0000 UTC m=+98.864708082 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.502181 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.502713 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.002695536 +0000 UTC m=+98.966878410 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.603688 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.604076 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.104060659 +0000 UTC m=+99.068243533 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.705039 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.705384 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.205370729 +0000 UTC m=+99.169553603 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.760324 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" event={"ID":"e43f862d-aa02-45c1-ab71-9b8235aacd37","Type":"ContainerStarted","Data":"7d217dde8385d1d93bd0d568212e516a2f6f29acc7025380c2098a7494b51b5c"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.763628 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" event={"ID":"121787da-7b66-4cec-af46-774c1d87cc40","Type":"ContainerStarted","Data":"6bfbab9a7b453aa6c0f31ee4769affe707d4d58ae70e2b246ae5c084c0285789"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.764643 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" event={"ID":"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e","Type":"ContainerStarted","Data":"c841116a3a0f942c52b72ca49d0a089fb9750414800acde3eb34725d12e6867f"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.766487 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" event={"ID":"0c90af26-b1fa-4faf-bbba-903dc47f7a46","Type":"ContainerStarted","Data":"c5c6a02e4cc2470b0307c2656dfa71c30158a605998de99dd19e667366aa5416"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.767317 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" event={"ID":"ecf41ec8-6c32-4b5c-a157-f52cc4de995a","Type":"ContainerStarted","Data":"ea6f9dc121c10a6cc1b829725eba8c2337f3d417a2013f138fa63f5aa8d2f6d0"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.768452 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" event={"ID":"389ef0a8-f13c-4eb7-8293-cecaff735697","Type":"ContainerStarted","Data":"5b4586dd29f045188e2bf9374a6c9613f65c74957b0b7443c6c5756f66554629"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.769752 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" event={"ID":"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0","Type":"ContainerStarted","Data":"d5cbeeffd4e233ec2de722f9716ae97e416e2a7bb6dea15877983e4eba5406c8"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.770800 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" event={"ID":"cb68a9be-651d-4412-a679-b8da3905f2dc","Type":"ContainerStarted","Data":"469bf374838f98c7c19f4de1d89bbda6805ea1691d56041eb741b6616b723f0f"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.771764 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" event={"ID":"19bfb345-68d7-418c-9695-2842e8b9f53a","Type":"ContainerStarted","Data":"22fd341db91feddfe7e4f419c34a1691d5db727cef9ea26044fb798c33ec75b0"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.772666 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4tbt4" event={"ID":"595c726d-77f3-427f-a098-2301abc05d3b","Type":"ContainerStarted","Data":"f14bbcfbb5a93194db0ced5826667fc4a3761ae287e6d45e66acd29643a4aecf"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.773458 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" event={"ID":"14f31472-a674-46f3-a8db-faf5eeb91cb9","Type":"ContainerStarted","Data":"adedcd58925894707481db2ef23a6f6e25c04d588f976eaef5df290fe322f8ad"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.774580 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-4svb7" event={"ID":"bfa657b3-28e7-4503-a332-a2cd83725356","Type":"ContainerStarted","Data":"2f2a16009a88807ccf22b309c351fdb4ecbdbc775284d08ffb87ac5e96577025"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.775559 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" event={"ID":"dbc51af2-c004-4115-a615-9d98b418c70f","Type":"ContainerStarted","Data":"e8ce85ce864897c93a825838119c9a511098093d142bcc8bec4d351a7cc442e7"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.776276 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" event={"ID":"bcde5fe1-42de-449a-9743-2f313a33659a","Type":"ContainerStarted","Data":"1e20de379fc231bd77b7642446a4a8bfca561e2a6f19127b14f90a36af0f95dd"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.777298 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" event={"ID":"a5c87c48-f06a-4f35-a336-2d74a88c40ac","Type":"ContainerStarted","Data":"7d7ba4d9c16ded0590c50d499150efab9253643be033ab4652e15e47709aa4f1"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.778417 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" event={"ID":"bef36296-0ea5-4316-a31a-c14346fc1597","Type":"ContainerStarted","Data":"cbca7ba654047997b2f86cd6006ef908adb5bf7e65208bd938b2ac754def00f5"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.779455 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" event={"ID":"5cdebe9f-d5ed-4fd3-9648-6cea02821835","Type":"ContainerStarted","Data":"21750bc0f393f4a91ffd25cda24ddbc7f993fc7dfa92c5f2912c3824ab7a6cc3"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.780484 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-t9n45" event={"ID":"88b3dd6f-8bee-4f26-92b4-b9c79f847a6f","Type":"ContainerStarted","Data":"9fe27dcb2ec7ae8bb3c65e7530e3e7403eb71546764d8d0daa916e7e192bd2cf"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.782017 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" event={"ID":"50052522-e0ba-4c6f-9b01-441578281afa","Type":"ContainerStarted","Data":"dbe982dfe6fa51b090a71af4a4d109e35dc155f48f7a56e7853cae92cf45d07f"} Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.785993 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.786035 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.786231 4767 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx7rs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.786238 4767 patch_prober.go:28] interesting pod/console-operator-58897d9998-b7z9l container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.786257 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.786274 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" podUID="9414071e-c859-4b9b-a1b2-51026b6886d7" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.787825 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-lpxgp" podStartSLOduration=74.787805166 podStartE2EDuration="1m14.787805166s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:32.78541035 +0000 UTC m=+98.749593234" watchObservedRunningTime="2026-01-28 18:31:32.787805166 +0000 UTC m=+98.751988040" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.805966 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.808170 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.308149227 +0000 UTC m=+99.272332111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.817098 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-qfdzz" podStartSLOduration=74.817081283 podStartE2EDuration="1m14.817081283s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:32.816140533 +0000 UTC m=+98.780323407" watchObservedRunningTime="2026-01-28 18:31:32.817081283 +0000 UTC m=+98.781264157" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.859484 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-lq7cv" podStartSLOduration=75.859462788 podStartE2EDuration="1m15.859462788s" podCreationTimestamp="2026-01-28 18:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:32.856695119 +0000 UTC m=+98.820877993" watchObservedRunningTime="2026-01-28 18:31:32.859462788 +0000 UTC m=+98.823645662" Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.909905 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:32 crc kubenswrapper[4767]: E0128 18:31:32.910458 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.410442349 +0000 UTC m=+99.374625223 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:32 crc kubenswrapper[4767]: I0128 18:31:32.936432 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-5vd7s" podStartSLOduration=74.93640719 podStartE2EDuration="1m14.93640719s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:32.935703027 +0000 UTC m=+98.899885901" watchObservedRunningTime="2026-01-28 18:31:32.93640719 +0000 UTC m=+98.900590074" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.011604 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.011777 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.511747919 +0000 UTC m=+99.475930793 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.011897 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.012195 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.512187574 +0000 UTC m=+99.476370498 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.112888 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.113037 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.613016679 +0000 UTC m=+99.577199543 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.113180 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.113465 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.613457653 +0000 UTC m=+99.577640527 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.214364 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.214598 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.714568137 +0000 UTC m=+99.678751011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.214740 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.215092 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.715076323 +0000 UTC m=+99.679259207 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.315518 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.316017 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.815997631 +0000 UTC m=+99.780180505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.321015 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.321060 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.416846 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.417450 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:33.917430856 +0000 UTC m=+99.881613780 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.518193 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.518438 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.018406926 +0000 UTC m=+99.982589810 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.518721 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.519003 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.018992195 +0000 UTC m=+99.983175069 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.625924 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.626110 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.12606903 +0000 UTC m=+100.090251904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.626363 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.626656 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.126645148 +0000 UTC m=+100.090828012 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.727355 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.727506 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.227471683 +0000 UTC m=+100.191654547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.727604 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.727941 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.227929128 +0000 UTC m=+100.192112002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.798536 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" event={"ID":"b57f864b-8ab2-499a-a47e-b4a4c62842e7","Type":"ContainerStarted","Data":"533e08262d8663a6db96c61b4deefb0a798033bb5ec7413e4e044b61c4d094f0"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.799705 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" event={"ID":"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0","Type":"ContainerStarted","Data":"369aca47976e435d6226598aff88676c4f1cf0680ded312e898498c4ddaa8351"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.800501 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" event={"ID":"583ee7da-190f-4eae-813d-e195a64c236f","Type":"ContainerStarted","Data":"acce074a841f52a6778fbb6ae60b515f1ac9167ae20b87e230a8cc5604af9b90"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.801604 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" event={"ID":"921a23b8-84dd-478c-b1f8-3e9f812d79cb","Type":"ContainerStarted","Data":"320b9160ee4508c3ad8735190f7452e3b851ca2ed60b000b725a00745fded2f1"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.802472 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" event={"ID":"14f31472-a674-46f3-a8db-faf5eeb91cb9","Type":"ContainerStarted","Data":"4b5bad99e8a910084f7cc61d22db4324813eef234d8455afd2e7be919c8d2cf5"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.803329 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-g62xj" event={"ID":"604a371d-6aff-4eb9-85b7-3f426d1bdc5c","Type":"ContainerStarted","Data":"1d02bdefa5a907bd62ddf22e454617207f9a596b96e05f87a014ca1582e9bcea"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.805836 4767 generic.go:334] "Generic (PLEG): container finished" podID="dbc51af2-c004-4115-a615-9d98b418c70f" containerID="e8ce85ce864897c93a825838119c9a511098093d142bcc8bec4d351a7cc442e7" exitCode=0 Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.806075 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" event={"ID":"dbc51af2-c004-4115-a615-9d98b418c70f","Type":"ContainerDied","Data":"e8ce85ce864897c93a825838119c9a511098093d142bcc8bec4d351a7cc442e7"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.807927 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" event={"ID":"bcde5fe1-42de-449a-9743-2f313a33659a","Type":"ContainerStarted","Data":"687db6835b47ef59bf0a8c5a29178c8469635231464e234ae28e76c219f66f33"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.809297 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" event={"ID":"121787da-7b66-4cec-af46-774c1d87cc40","Type":"ContainerStarted","Data":"40237f1ed577481962e7222b4cd0a2221311240012fb09ad84b8127b2c3c0e8f"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.814426 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" event={"ID":"67ab6cf3-b8b2-4adb-a126-b0aa842964e8","Type":"ContainerStarted","Data":"3b3cf759056656b3b97289f69ba818bf1697bbad3809f2996ce8c9fd6b069ebe"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.815873 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" event={"ID":"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31","Type":"ContainerStarted","Data":"42f70b0486761f5f3a37b52c0a1100705763fc5ca0a0b3f482aa4cf6f50e9b7a"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.816936 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4tbt4" event={"ID":"595c726d-77f3-427f-a098-2301abc05d3b","Type":"ContainerStarted","Data":"1a7f1fe54d37df27790e6bc911772d75389bcb8ba195f7390e972ec87a1abd86"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.818734 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" event={"ID":"30268a6d-2d26-4c9e-8692-65dd9e6d75e1","Type":"ContainerStarted","Data":"8541081ae9d8d90c5dc3c54e470bd54540357ffaa86643cc10df02be4f6582ea"} Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.819483 4767 patch_prober.go:28] interesting pod/console-operator-58897d9998-b7z9l container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" start-of-body= Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.819520 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" podUID="9414071e-c859-4b9b-a1b2-51026b6886d7" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.12:8443/readyz\": dial tcp 10.217.0.12:8443: connect: connection refused" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.824633 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-rr2mf" podStartSLOduration=75.824617921 podStartE2EDuration="1m15.824617921s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:32.968511706 +0000 UTC m=+98.932694580" watchObservedRunningTime="2026-01-28 18:31:33.824617921 +0000 UTC m=+99.788800795" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.828957 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.829084 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.329069993 +0000 UTC m=+100.293252867 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.829171 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.829465 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.329457385 +0000 UTC m=+100.293640259 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.840787 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-ctkl7" podStartSLOduration=75.840771297 podStartE2EDuration="1m15.840771297s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.838393051 +0000 UTC m=+99.802575925" watchObservedRunningTime="2026-01-28 18:31:33.840771297 +0000 UTC m=+99.804954171" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.859889 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" podStartSLOduration=76.859874978 podStartE2EDuration="1m16.859874978s" podCreationTimestamp="2026-01-28 18:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.858108522 +0000 UTC m=+99.822291396" watchObservedRunningTime="2026-01-28 18:31:33.859874978 +0000 UTC m=+99.824057852" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.879156 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" podStartSLOduration=76.879135934 podStartE2EDuration="1m16.879135934s" podCreationTimestamp="2026-01-28 18:30:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.876152199 +0000 UTC m=+99.840335093" watchObservedRunningTime="2026-01-28 18:31:33.879135934 +0000 UTC m=+99.843318808" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.892743 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" podStartSLOduration=75.892717408 podStartE2EDuration="1m15.892717408s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.888559866 +0000 UTC m=+99.852742750" watchObservedRunningTime="2026-01-28 18:31:33.892717408 +0000 UTC m=+99.856900312" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.905846 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-4ntrw" podStartSLOduration=75.905830139 podStartE2EDuration="1m15.905830139s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.904613779 +0000 UTC m=+99.868796663" watchObservedRunningTime="2026-01-28 18:31:33.905830139 +0000 UTC m=+99.870013013" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.930579 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.930745 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.430723335 +0000 UTC m=+100.394906209 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.930783 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:33 crc kubenswrapper[4767]: E0128 18:31:33.931295 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.431276283 +0000 UTC m=+100.395459187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.937665 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" podStartSLOduration=75.937646866 podStartE2EDuration="1m15.937646866s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.925083974 +0000 UTC m=+99.889266858" watchObservedRunningTime="2026-01-28 18:31:33.937646866 +0000 UTC m=+99.901829760" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.939848 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-58t9w" podStartSLOduration=75.939835346 podStartE2EDuration="1m15.939835346s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.938314187 +0000 UTC m=+99.902497081" watchObservedRunningTime="2026-01-28 18:31:33.939835346 +0000 UTC m=+99.904018230" Jan 28 18:31:33 crc kubenswrapper[4767]: I0128 18:31:33.956271 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-b2scv" podStartSLOduration=75.956247851 podStartE2EDuration="1m15.956247851s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:33.953993578 +0000 UTC m=+99.918176452" watchObservedRunningTime="2026-01-28 18:31:33.956247851 +0000 UTC m=+99.920430755" Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.034775 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.035094 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.535051552 +0000 UTC m=+100.499234426 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.035310 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.035655 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.53563888 +0000 UTC m=+100.499821754 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.136638 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.136929 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.636887269 +0000 UTC m=+100.601070133 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.238890 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.239462 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.73943788 +0000 UTC m=+100.703620754 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.324433 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:34 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:34 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:34 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.324491 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.340854 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.341324 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.841306358 +0000 UTC m=+100.805489232 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.442278 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.442838 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:34.942808955 +0000 UTC m=+100.906991839 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.543387 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.543543 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.043522086 +0000 UTC m=+101.007704960 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.543721 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.544077 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.044067724 +0000 UTC m=+101.008250598 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.645045 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.645482 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.145463466 +0000 UTC m=+101.109646340 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.746141 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.746482 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.246466427 +0000 UTC m=+101.210649301 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.825609 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" event={"ID":"583ee7da-190f-4eae-813d-e195a64c236f","Type":"ContainerStarted","Data":"f092798a3befeb88cae2d312ea8018deca853b2a6760b83161b31e662d05dfdf"} Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.825969 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.827714 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.827771 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.847273 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.847604 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.347585622 +0000 UTC m=+101.311768496 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:34 crc kubenswrapper[4767]: I0128 18:31:34.949640 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:34 crc kubenswrapper[4767]: E0128 18:31:34.950800 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.450785883 +0000 UTC m=+101.414968757 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.021495 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-nrw74" podStartSLOduration=77.021474654 podStartE2EDuration="1m17.021474654s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.017289701 +0000 UTC m=+100.981472595" watchObservedRunningTime="2026-01-28 18:31:35.021474654 +0000 UTC m=+100.985657538" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.059939 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.060404 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.560387159 +0000 UTC m=+101.524570033 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.066638 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-g62xj" podStartSLOduration=9.066616418 podStartE2EDuration="9.066616418s" podCreationTimestamp="2026-01-28 18:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.06229078 +0000 UTC m=+101.026473664" watchObservedRunningTime="2026-01-28 18:31:35.066616418 +0000 UTC m=+101.030799292" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.087140 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-4svb7" podStartSLOduration=77.087123565 podStartE2EDuration="1m17.087123565s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.084087357 +0000 UTC m=+101.048270231" watchObservedRunningTime="2026-01-28 18:31:35.087123565 +0000 UTC m=+101.051306439" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.099833 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-t9n45" podStartSLOduration=9.099815 podStartE2EDuration="9.099815s" podCreationTimestamp="2026-01-28 18:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.097857148 +0000 UTC m=+101.062040032" watchObservedRunningTime="2026-01-28 18:31:35.099815 +0000 UTC m=+101.063997874" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.161898 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.162315 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.662284019 +0000 UTC m=+101.626466893 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.264130 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.264951 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.764928502 +0000 UTC m=+101.729111376 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.322828 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:35 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:35 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:35 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.322910 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.365611 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.366047 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.866028086 +0000 UTC m=+101.830211030 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.466868 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.467049 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.967022616 +0000 UTC m=+101.931205490 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.467130 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.467453 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:35.967445 +0000 UTC m=+101.931627874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.568410 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.568589 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.068562094 +0000 UTC m=+102.032744968 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.568879 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.569177 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.069168754 +0000 UTC m=+102.033351628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.670500 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.670723 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.170693041 +0000 UTC m=+102.134875925 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.670915 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.671304 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.17128799 +0000 UTC m=+102.135470874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.772280 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.772471 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.272440706 +0000 UTC m=+102.236623580 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.772552 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.772909 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.27290153 +0000 UTC m=+102.237084404 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.837179 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" event={"ID":"67ab6cf3-b8b2-4adb-a126-b0aa842964e8","Type":"ContainerStarted","Data":"f799555c4cac27e8bc1c79b2c6257b4660a595a6af6271b541d345f5655c7ce0"} Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.838613 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" event={"ID":"b8db25ab-2803-4342-a0a3-c1bd5a44ba2e","Type":"ContainerStarted","Data":"67b34db78ffd259859e6cecc6598a992137b2eccbd98626a74b218fb1bf7e263"} Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.841578 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" event={"ID":"921a23b8-84dd-478c-b1f8-3e9f812d79cb","Type":"ContainerStarted","Data":"55578e71d5b623162bad9ff333d1b87657111beebc8960188f2c29057c65741a"} Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.844697 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" event={"ID":"dbc51af2-c004-4115-a615-9d98b418c70f","Type":"ContainerStarted","Data":"16b882ceadaed140e6a938bf2d52dd14440d0a9a2b7c674ec9ad4442781dc89f"} Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.844730 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.845320 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.845355 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.845775 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.847375 4767 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-gxsvp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.847433 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.852225 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-q6cnp" podStartSLOduration=77.852198686 podStartE2EDuration="1m17.852198686s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.851642859 +0000 UTC m=+101.815825733" watchObservedRunningTime="2026-01-28 18:31:35.852198686 +0000 UTC m=+101.816381560" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.873148 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.873253 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.37323503 +0000 UTC m=+102.337417894 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.873376 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.873652 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.373644813 +0000 UTC m=+102.337827687 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.888576 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" podStartSLOduration=77.888554729 podStartE2EDuration="1m17.888554729s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.885909305 +0000 UTC m=+101.850092199" watchObservedRunningTime="2026-01-28 18:31:35.888554729 +0000 UTC m=+101.852737603" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.889147 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" podStartSLOduration=77.889143098 podStartE2EDuration="1m17.889143098s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.870489252 +0000 UTC m=+101.834672126" watchObservedRunningTime="2026-01-28 18:31:35.889143098 +0000 UTC m=+101.853325972" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.911001 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" podStartSLOduration=77.910985007 podStartE2EDuration="1m17.910985007s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.910655097 +0000 UTC m=+101.874838001" watchObservedRunningTime="2026-01-28 18:31:35.910985007 +0000 UTC m=+101.875167881" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.930237 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" podStartSLOduration=77.930195572 podStartE2EDuration="1m17.930195572s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.926012308 +0000 UTC m=+101.890195182" watchObservedRunningTime="2026-01-28 18:31:35.930195572 +0000 UTC m=+101.894378456" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.941281 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-rll48" podStartSLOduration=77.941263916 podStartE2EDuration="1m17.941263916s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:35.941035759 +0000 UTC m=+101.905218643" watchObservedRunningTime="2026-01-28 18:31:35.941263916 +0000 UTC m=+101.905446790" Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.974832 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.974953 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.474934163 +0000 UTC m=+102.439117037 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:35 crc kubenswrapper[4767]: I0128 18:31:35.976265 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:35 crc kubenswrapper[4767]: E0128 18:31:35.977346 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.477325899 +0000 UTC m=+102.441508773 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.078535 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.079094 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.579052933 +0000 UTC m=+102.543235847 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.179993 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.180362 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.680345413 +0000 UTC m=+102.644528297 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.281385 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.281656 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.282839 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.782818011 +0000 UTC m=+102.747000885 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.287873 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0dc01d59-d401-4c7c-9eec-0a67aa5261fc-metrics-certs\") pod \"network-metrics-daemon-qbch4\" (UID: \"0dc01d59-d401-4c7c-9eec-0a67aa5261fc\") " pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.323021 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:36 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:36 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:36 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.323093 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.382499 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.382842 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.88282977 +0000 UTC m=+102.847012644 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.483483 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.483677 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.983650135 +0000 UTC m=+102.947833009 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.484038 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.484341 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:36.984325647 +0000 UTC m=+102.948508521 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.559791 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-qbch4" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.585430 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.585622 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.085595005 +0000 UTC m=+103.049777879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.585745 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.586053 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.08604559 +0000 UTC m=+103.050228464 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.686611 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.687275 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.187259798 +0000 UTC m=+103.151442672 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.788299 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.788708 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.288692702 +0000 UTC m=+103.252875576 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.850968 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" event={"ID":"cd5f7195-94fd-45d5-bf00-ceecdcb22ea0","Type":"ContainerStarted","Data":"e7e6fd8bf6a08eecdfade1f07077ff1a9f80fa69a80ed3c92577f0063d9cb40c"} Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.853105 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" event={"ID":"121787da-7b66-4cec-af46-774c1d87cc40","Type":"ContainerStarted","Data":"b8b30f094c86d1c51b259db1b01872fceb513803d9b2f78fd0835c226d923715"} Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.853187 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.854725 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" event={"ID":"583ee7da-190f-4eae-813d-e195a64c236f","Type":"ContainerStarted","Data":"0bf57bf69863aaed2e20e21f4a853732777926ffbc2be2717677d967a34e211e"} Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.856514 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" event={"ID":"fd30d9e8-8c0c-4db4-8923-7a7a03e1df31","Type":"ContainerStarted","Data":"7bfddc19e9d207f209a9977760ca1557bfdf9194ac8986a46961e9c43e68b71f"} Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.862242 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-4tbt4" event={"ID":"595c726d-77f3-427f-a098-2301abc05d3b","Type":"ContainerStarted","Data":"a4c7363580c27048ae826b53d850956cf2738709ed0eb7dc9aa13a5302c5cb34"} Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.864547 4767 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-gxsvp container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" start-of-body= Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.864593 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.40:8080/healthz\": dial tcp 10.217.0.40:8080: connect: connection refused" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.877440 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-v7787" podStartSLOduration=78.87742212 podStartE2EDuration="1m18.87742212s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:36.87583033 +0000 UTC m=+102.840013224" watchObservedRunningTime="2026-01-28 18:31:36.87742212 +0000 UTC m=+102.841604994" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.889177 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.889435 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.389399064 +0000 UTC m=+103.353581938 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.889516 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.889805 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.389790966 +0000 UTC m=+103.353973840 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.905075 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-vsxcm" podStartSLOduration=78.905056714 podStartE2EDuration="1m18.905056714s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:36.894949961 +0000 UTC m=+102.859132835" watchObservedRunningTime="2026-01-28 18:31:36.905056714 +0000 UTC m=+102.869239658" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.933697 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" podStartSLOduration=78.93367767 podStartE2EDuration="1m18.93367767s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:36.930669024 +0000 UTC m=+102.894851898" watchObservedRunningTime="2026-01-28 18:31:36.93367767 +0000 UTC m=+102.897860544" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.950512 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.958523 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-r4jlz" podStartSLOduration=78.958507684 podStartE2EDuration="1m18.958507684s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:36.956421618 +0000 UTC m=+102.920604492" watchObservedRunningTime="2026-01-28 18:31:36.958507684 +0000 UTC m=+102.922690558" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.959244 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-svn7g" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.984435 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-kx6dk" podStartSLOduration=78.984419153 podStartE2EDuration="1m18.984419153s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:36.981636774 +0000 UTC m=+102.945819658" watchObservedRunningTime="2026-01-28 18:31:36.984419153 +0000 UTC m=+102.948602027" Jan 28 18:31:36 crc kubenswrapper[4767]: I0128 18:31:36.990527 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:36 crc kubenswrapper[4767]: E0128 18:31:36.991782 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.491768578 +0000 UTC m=+103.455951452 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.005177 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-qbch4"] Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.013630 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-4tbt4" podStartSLOduration=11.013610097 podStartE2EDuration="11.013610097s" podCreationTimestamp="2026-01-28 18:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:37.012701568 +0000 UTC m=+102.976884442" watchObservedRunningTime="2026-01-28 18:31:37.013610097 +0000 UTC m=+102.977792971" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.094962 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.096724 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.596709695 +0000 UTC m=+103.560892569 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.196052 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.196278 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.696247038 +0000 UTC m=+103.660429922 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.196614 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.196929 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.6969172 +0000 UTC m=+103.661100074 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.297401 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.297717 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.797699254 +0000 UTC m=+103.761882128 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.353138 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:37 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:37 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:37 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.353192 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.399032 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.399377 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:37.899361906 +0000 UTC m=+103.863544780 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.500288 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.500468 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.000441409 +0000 UTC m=+103.964624283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.500519 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.500817 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.000803251 +0000 UTC m=+103.964986125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.521126 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.521168 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.523117 4767 patch_prober.go:28] interesting pod/apiserver-76f77b778f-bmcrg container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.523157 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" podUID="0c90af26-b1fa-4faf-bbba-903dc47f7a46" containerName="openshift-apiserver" probeResult="failure" output="Get \"https://10.217.0.6:8443/livez\": dial tcp 10.217.0.6:8443: connect: connection refused" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.578385 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.579145 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.581443 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.582255 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.583176 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.601781 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.601934 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.101910224 +0000 UTC m=+104.066093098 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.602317 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.602722 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.10270683 +0000 UTC m=+104.066889704 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.703563 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.703792 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.203758252 +0000 UTC m=+104.167941136 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.703846 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.703930 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.703970 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.704299 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.20428958 +0000 UTC m=+104.168472534 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.805407 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.805607 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.30558193 +0000 UTC m=+104.269764814 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.805671 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.805713 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.805778 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.805863 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.806057 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.306040585 +0000 UTC m=+104.270223459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.835440 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.872984 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qbch4" event={"ID":"0dc01d59-d401-4c7c-9eec-0a67aa5261fc","Type":"ContainerStarted","Data":"1e8f9e7cfc60ef9279adfe2e0874ffcd8244fe6b1786517bec539d2e6c21462e"} Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.873775 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.891604 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.907402 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.907602 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.407579112 +0000 UTC m=+104.371761986 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.907695 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:37 crc kubenswrapper[4767]: E0128 18:31:37.908192 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.408182901 +0000 UTC m=+104.372365775 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:37 crc kubenswrapper[4767]: I0128 18:31:37.943980 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-8st5n" podStartSLOduration=79.943956806 podStartE2EDuration="1m19.943956806s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:37.936336452 +0000 UTC m=+103.900519326" watchObservedRunningTime="2026-01-28 18:31:37.943956806 +0000 UTC m=+103.908139680" Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.009321 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.009550 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.509518673 +0000 UTC m=+104.473701547 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.009674 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.010188 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.510156394 +0000 UTC m=+104.474339458 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.110756 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.111046 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.61100252 +0000 UTC m=+104.575185394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.111135 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.111464 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.611444593 +0000 UTC m=+104.575627457 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.211854 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.212574 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.712556467 +0000 UTC m=+104.676739341 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.313414 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.313718 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.813705203 +0000 UTC m=+104.777888077 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.322468 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:38 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:38 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:38 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.322517 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.331367 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.415100 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.415248 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.91521751 +0000 UTC m=+104.879400394 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.415364 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.415718 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:38.915705626 +0000 UTC m=+104.879888500 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.516684 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.516889 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.016857332 +0000 UTC m=+104.981040196 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.517281 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.517677 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.017660257 +0000 UTC m=+104.981843131 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.613355 4767 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-n85dl container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.613947 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" podUID="dbc51af2-c004-4115-a615-9d98b418c70f" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.613375 4767 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-n85dl container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.614046 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" podUID="dbc51af2-c004-4115-a615-9d98b418c70f" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.26:8443/healthz\": dial tcp 10.217.0.26:8443: connect: connection refused" Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.618061 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.618290 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.118254604 +0000 UTC m=+105.082437478 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.618572 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.618974 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.118959557 +0000 UTC m=+105.083142431 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.719668 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.719873 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.219832274 +0000 UTC m=+105.184015148 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.720031 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.720422 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.220406302 +0000 UTC m=+105.184589176 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.822082 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.822300 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.322268851 +0000 UTC m=+105.286451725 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.822529 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.822922 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.322913581 +0000 UTC m=+105.287096455 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.879358 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" event={"ID":"ecf41ec8-6c32-4b5c-a157-f52cc4de995a","Type":"ContainerStarted","Data":"71a4def3d3fc4f887ad6ca9cbea5e13363b966c309ecd3e716084712cf3d1beb"} Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.880855 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qbch4" event={"ID":"0dc01d59-d401-4c7c-9eec-0a67aa5261fc","Type":"ContainerStarted","Data":"f2110422b47eeedac84f32eed658b22be128a12ee19e8dbd216550a19d469518"} Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.881906 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5","Type":"ContainerStarted","Data":"48e511b50e66a9b6375a71aeb6794e17c59eda4ef1aa027fa14c6c6d51f2f164"} Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.923450 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.923747 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.423704725 +0000 UTC m=+105.387887599 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:38 crc kubenswrapper[4767]: I0128 18:31:38.924003 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:38 crc kubenswrapper[4767]: E0128 18:31:38.924431 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.424413398 +0000 UTC m=+105.388596312 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.025310 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.025481 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.52545127 +0000 UTC m=+105.489634144 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.025752 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.026195 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.526177383 +0000 UTC m=+105.490360257 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.081586 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.126509 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.126844 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.626823713 +0000 UTC m=+105.591006587 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.154885 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.209274 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.209711 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.212437 4767 patch_prober.go:28] interesting pod/console-f9d7485db-qfdzz container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.212501 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-qfdzz" podUID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.228084 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.229949 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.729892259 +0000 UTC m=+105.694075353 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.248105 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-b7z9l" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.319062 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.323998 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:39 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:39 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:39 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.324070 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.328773 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.328937 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.828910137 +0000 UTC m=+105.793093021 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.329025 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.330403 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.830389634 +0000 UTC m=+105.794572508 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.373577 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.419152 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-lslnr" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.429847 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.430024 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.929993729 +0000 UTC m=+105.894176613 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.430073 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.431234 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:39.931220599 +0000 UTC m=+105.895403473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.442598 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.455780 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.499944 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-gft5f"] Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.501006 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.508650 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.533693 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.534343 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gft5f"] Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.535259 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.035227496 +0000 UTC m=+105.999410370 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.613660 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.613722 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.619825 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.619879 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.635704 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tx7zn\" (UniqueName: \"kubernetes.io/projected/daa183e4-f49d-4f7d-9f9b-66e42f869297-kube-api-access-tx7zn\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.635801 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.635919 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-catalog-content\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.635946 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-utilities\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.636234 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.136183905 +0000 UTC m=+106.100366849 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.687617 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4lhmz"] Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.688564 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.690262 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.695871 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.702664 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-472lc" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.708398 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4lhmz"] Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.736939 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.737072 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tx7zn\" (UniqueName: \"kubernetes.io/projected/daa183e4-f49d-4f7d-9f9b-66e42f869297-kube-api-access-tx7zn\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.737105 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.237078893 +0000 UTC m=+106.201261767 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.737182 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.737278 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-catalog-content\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.737298 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-utilities\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.737451 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.237437644 +0000 UTC m=+106.201620518 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.737673 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-catalog-content\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.737742 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-utilities\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.765380 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tx7zn\" (UniqueName: \"kubernetes.io/projected/daa183e4-f49d-4f7d-9f9b-66e42f869297-kube-api-access-tx7zn\") pod \"certified-operators-gft5f\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.830019 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.838277 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.838499 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkq77\" (UniqueName: \"kubernetes.io/projected/8d36dd2c-a6e9-4369-aae5-c657695233a5-kube-api-access-nkq77\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.838527 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-utilities\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.838602 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-catalog-content\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.838697 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.338683482 +0000 UTC m=+106.302866346 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.883903 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-86rk5"] Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.885096 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.897695 4767 generic.go:334] "Generic (PLEG): container finished" podID="bcde5fe1-42de-449a-9743-2f313a33659a" containerID="687db6835b47ef59bf0a8c5a29178c8469635231464e234ae28e76c219f66f33" exitCode=0 Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.897789 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" event={"ID":"bcde5fe1-42de-449a-9743-2f313a33659a","Type":"ContainerDied","Data":"687db6835b47ef59bf0a8c5a29178c8469635231464e234ae28e76c219f66f33"} Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.924407 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-86rk5"] Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.928151 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5","Type":"ContainerStarted","Data":"7dd1bc3defd9ae52eb30144049cb2b1fb4e414787e323d8ff2e3f5c9f2dca2a7"} Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.939814 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkq77\" (UniqueName: \"kubernetes.io/projected/8d36dd2c-a6e9-4369-aae5-c657695233a5-kube-api-access-nkq77\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.939861 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-utilities\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.939920 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-catalog-content\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.939964 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:39 crc kubenswrapper[4767]: E0128 18:31:39.940325 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.440306293 +0000 UTC m=+106.404489157 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.940926 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-utilities\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.946776 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-catalog-content\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:39 crc kubenswrapper[4767]: I0128 18:31:39.975773 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkq77\" (UniqueName: \"kubernetes.io/projected/8d36dd2c-a6e9-4369-aae5-c657695233a5-kube-api-access-nkq77\") pod \"community-operators-4lhmz\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.004578 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.040823 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.041097 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-catalog-content\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.041175 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjgbt\" (UniqueName: \"kubernetes.io/projected/c91bd017-c929-4891-9118-95e20ef61238-kube-api-access-bjgbt\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.041199 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-utilities\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.042335 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.542312226 +0000 UTC m=+106.506495100 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.088816 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.117847 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.118398 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.121454 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6w58h"] Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.123239 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.123531 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.123602 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.129320 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.144091 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.144128 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-catalog-content\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.144168 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjgbt\" (UniqueName: \"kubernetes.io/projected/c91bd017-c929-4891-9118-95e20ef61238-kube-api-access-bjgbt\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.144193 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-utilities\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.144644 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-catalog-content\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.144852 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-utilities\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.144932 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.644916888 +0000 UTC m=+106.609099832 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.168609 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6w58h"] Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.205060 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.230611 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjgbt\" (UniqueName: \"kubernetes.io/projected/c91bd017-c929-4891-9118-95e20ef61238-kube-api-access-bjgbt\") pod \"certified-operators-86rk5\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.249070 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.249303 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-utilities\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.249336 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8v5hl\" (UniqueName: \"kubernetes.io/projected/b0ee47b3-df01-4b36-8d19-ead3db6a705d-kube-api-access-8v5hl\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.249376 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-catalog-content\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.249429 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/848cd268-6278-49a5-9ac9-3e18122e6d5c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.249458 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/848cd268-6278-49a5-9ac9-3e18122e6d5c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.249554 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.749538165 +0000 UTC m=+106.713721039 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.299149 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-gft5f"] Jan 28 18:31:40 crc kubenswrapper[4767]: W0128 18:31:40.322402 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddaa183e4_f49d_4f7d_9f9b_66e42f869297.slice/crio-a9f1dc63e36e9c185de8451a5fbbf9c0053998a81ad5fa53f7ab6e0144f2568b WatchSource:0}: Error finding container a9f1dc63e36e9c185de8451a5fbbf9c0053998a81ad5fa53f7ab6e0144f2568b: Status 404 returned error can't find the container with id a9f1dc63e36e9c185de8451a5fbbf9c0053998a81ad5fa53f7ab6e0144f2568b Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.330146 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:40 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:40 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:40 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.330258 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.350643 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/848cd268-6278-49a5-9ac9-3e18122e6d5c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.350699 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/848cd268-6278-49a5-9ac9-3e18122e6d5c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.350744 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-utilities\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.350767 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.350783 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8v5hl\" (UniqueName: \"kubernetes.io/projected/b0ee47b3-df01-4b36-8d19-ead3db6a705d-kube-api-access-8v5hl\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.350820 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-catalog-content\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.351733 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-utilities\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.351996 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.851981391 +0000 UTC m=+106.816164315 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.352083 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/848cd268-6278-49a5-9ac9-3e18122e6d5c-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.352363 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-catalog-content\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.389374 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/848cd268-6278-49a5-9ac9-3e18122e6d5c-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.391248 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8v5hl\" (UniqueName: \"kubernetes.io/projected/b0ee47b3-df01-4b36-8d19-ead3db6a705d-kube-api-access-8v5hl\") pod \"community-operators-6w58h\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.451965 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.452493 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:40.952473556 +0000 UTC m=+106.916656430 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.458846 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.486078 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.498500 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4lhmz"] Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.508577 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.557561 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.558101 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.058081674 +0000 UTC m=+107.022264548 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.660631 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.661053 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.161037617 +0000 UTC m=+107.125220491 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.789833 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.790345 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.290328673 +0000 UTC m=+107.254511547 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.891234 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.892818 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.39277005 +0000 UTC m=+107.356952924 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.894485 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.895743 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.39559845 +0000 UTC m=+107.359781504 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.935302 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-86rk5"] Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.959335 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gft5f" event={"ID":"daa183e4-f49d-4f7d-9f9b-66e42f869297","Type":"ContainerStarted","Data":"85d8324ce62e23eb0ed1790e5a1d65bb4ed5c7276d129edf664cdf2fd7a36ed0"} Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.959380 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gft5f" event={"ID":"daa183e4-f49d-4f7d-9f9b-66e42f869297","Type":"ContainerStarted","Data":"a9f1dc63e36e9c185de8451a5fbbf9c0053998a81ad5fa53f7ab6e0144f2568b"} Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.995674 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:40 crc kubenswrapper[4767]: E0128 18:31:40.996055 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.496038533 +0000 UTC m=+107.460221407 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:40 crc kubenswrapper[4767]: I0128 18:31:40.997840 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-qbch4" event={"ID":"0dc01d59-d401-4c7c-9eec-0a67aa5261fc","Type":"ContainerStarted","Data":"2826e638ca8c0e4c54f841c769e39dfafbb093761222700d9d72fea988eabee6"} Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.005895 4767 generic.go:334] "Generic (PLEG): container finished" podID="2b5603a9-6e04-475d-99c3-9f9ff91ba6b5" containerID="7dd1bc3defd9ae52eb30144049cb2b1fb4e414787e323d8ff2e3f5c9f2dca2a7" exitCode=0 Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.005968 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5","Type":"ContainerDied","Data":"7dd1bc3defd9ae52eb30144049cb2b1fb4e414787e323d8ff2e3f5c9f2dca2a7"} Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.007881 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lhmz" event={"ID":"8d36dd2c-a6e9-4369-aae5-c657695233a5","Type":"ContainerStarted","Data":"041f357abbc780a02f0e4641f869fc961dbb7197979b7ce3a58f1cf5d4996ad9"} Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.097287 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-qbch4" podStartSLOduration=83.097048974 podStartE2EDuration="1m23.097048974s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:41.038922885 +0000 UTC m=+107.003105769" watchObservedRunningTime="2026-01-28 18:31:41.097048974 +0000 UTC m=+107.061231848" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.098086 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.098511 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.59849409 +0000 UTC m=+107.562676964 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.141406 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.195718 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-pmb2t" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.199248 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.199608 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.699587633 +0000 UTC m=+107.663770507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.272335 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6w58h"] Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.303379 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.304867 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.804853141 +0000 UTC m=+107.769036015 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.328640 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:41 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:41 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:41 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.328713 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.415867 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.416980 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.916953507 +0000 UTC m=+107.881136381 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.417076 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.417438 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:41.917431212 +0000 UTC m=+107.881614076 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.498967 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qcqfz"] Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.500731 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.502046 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.504683 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.521705 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.521732 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.021711277 +0000 UTC m=+107.985894141 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.522295 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5g7k\" (UniqueName: \"kubernetes.io/projected/eff110e4-7a33-4782-86e1-efff7c646e6f-kube-api-access-h5g7k\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.522402 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.522485 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-utilities\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.522508 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-catalog-content\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.523471 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.023456454 +0000 UTC m=+107.987639418 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.542926 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcqfz"] Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.575575 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.623526 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.623589 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bcde5fe1-42de-449a-9743-2f313a33659a-config-volume\") pod \"bcde5fe1-42de-449a-9743-2f313a33659a\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.623612 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzl57\" (UniqueName: \"kubernetes.io/projected/bcde5fe1-42de-449a-9743-2f313a33659a-kube-api-access-dzl57\") pod \"bcde5fe1-42de-449a-9743-2f313a33659a\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.623652 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bcde5fe1-42de-449a-9743-2f313a33659a-secret-volume\") pod \"bcde5fe1-42de-449a-9743-2f313a33659a\" (UID: \"bcde5fe1-42de-449a-9743-2f313a33659a\") " Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.623748 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5g7k\" (UniqueName: \"kubernetes.io/projected/eff110e4-7a33-4782-86e1-efff7c646e6f-kube-api-access-h5g7k\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.623846 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-utilities\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.623866 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-catalog-content\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.624231 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.124158395 +0000 UTC m=+108.088341269 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.624357 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-catalog-content\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.624612 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-utilities\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.624937 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bcde5fe1-42de-449a-9743-2f313a33659a-config-volume" (OuterVolumeSpecName: "config-volume") pod "bcde5fe1-42de-449a-9743-2f313a33659a" (UID: "bcde5fe1-42de-449a-9743-2f313a33659a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.630924 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bcde5fe1-42de-449a-9743-2f313a33659a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "bcde5fe1-42de-449a-9743-2f313a33659a" (UID: "bcde5fe1-42de-449a-9743-2f313a33659a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.641862 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bcde5fe1-42de-449a-9743-2f313a33659a-kube-api-access-dzl57" (OuterVolumeSpecName: "kube-api-access-dzl57") pod "bcde5fe1-42de-449a-9743-2f313a33659a" (UID: "bcde5fe1-42de-449a-9743-2f313a33659a"). InnerVolumeSpecName "kube-api-access-dzl57". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.658246 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5g7k\" (UniqueName: \"kubernetes.io/projected/eff110e4-7a33-4782-86e1-efff7c646e6f-kube-api-access-h5g7k\") pod \"redhat-marketplace-qcqfz\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.726021 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.726150 4767 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/bcde5fe1-42de-449a-9743-2f313a33659a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.726162 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzl57\" (UniqueName: \"kubernetes.io/projected/bcde5fe1-42de-449a-9743-2f313a33659a-kube-api-access-dzl57\") on node \"crc\" DevicePath \"\"" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.726173 4767 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/bcde5fe1-42de-449a-9743-2f313a33659a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.726499 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.226484537 +0000 UTC m=+108.190667411 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.827738 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.827934 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.327916282 +0000 UTC m=+108.292099156 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.828313 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.828800 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.32877483 +0000 UTC m=+108.292957704 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.846639 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.883330 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9c"] Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.883959 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bcde5fe1-42de-449a-9743-2f313a33659a" containerName="collect-profiles" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.883972 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bcde5fe1-42de-449a-9743-2f313a33659a" containerName="collect-profiles" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.884073 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bcde5fe1-42de-449a-9743-2f313a33659a" containerName="collect-profiles" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.884823 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.910977 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9c"] Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.929025 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.929234 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-catalog-content\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.929273 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nttm\" (UniqueName: \"kubernetes.io/projected/2e90df07-d5ed-42b7-a3d8-de62235b551d-kube-api-access-6nttm\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:41 crc kubenswrapper[4767]: I0128 18:31:41.929311 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-utilities\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:41 crc kubenswrapper[4767]: E0128 18:31:41.929435 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.429413749 +0000 UTC m=+108.393596633 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.033628 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-catalog-content\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.033726 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nttm\" (UniqueName: \"kubernetes.io/projected/2e90df07-d5ed-42b7-a3d8-de62235b551d-kube-api-access-6nttm\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.033810 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-utilities\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.033861 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.034433 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.534414907 +0000 UTC m=+108.498597781 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.035339 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-catalog-content\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.036115 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-utilities\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.048135 4767 generic.go:334] "Generic (PLEG): container finished" podID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerID="dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123" exitCode=0 Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.048287 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6w58h" event={"ID":"b0ee47b3-df01-4b36-8d19-ead3db6a705d","Type":"ContainerDied","Data":"dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.048323 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6w58h" event={"ID":"b0ee47b3-df01-4b36-8d19-ead3db6a705d","Type":"ContainerStarted","Data":"b792360462326394fe8d0642cb85ffce6910ede30f7bd7d00e819806a768b842"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.054005 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.058328 4767 generic.go:334] "Generic (PLEG): container finished" podID="c91bd017-c929-4891-9118-95e20ef61238" containerID="bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750" exitCode=0 Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.058503 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86rk5" event={"ID":"c91bd017-c929-4891-9118-95e20ef61238","Type":"ContainerDied","Data":"bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.058538 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86rk5" event={"ID":"c91bd017-c929-4891-9118-95e20ef61238","Type":"ContainerStarted","Data":"0894a500d8f40b74ad7f4c66a10b7ec12104b31693e91fab84926d26dc707785"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.072392 4767 generic.go:334] "Generic (PLEG): container finished" podID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerID="85d8324ce62e23eb0ed1790e5a1d65bb4ed5c7276d129edf664cdf2fd7a36ed0" exitCode=0 Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.072509 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gft5f" event={"ID":"daa183e4-f49d-4f7d-9f9b-66e42f869297","Type":"ContainerDied","Data":"85d8324ce62e23eb0ed1790e5a1d65bb4ed5c7276d129edf664cdf2fd7a36ed0"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.088671 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"848cd268-6278-49a5-9ac9-3e18122e6d5c","Type":"ContainerStarted","Data":"108ec89d5b17d73c369151e46439767e23d862cf52bebcc6bbfc373ea3bcd66f"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.088732 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"848cd268-6278-49a5-9ac9-3e18122e6d5c","Type":"ContainerStarted","Data":"6b02a564a36797dcde76b63a1b574a6c18c827037295439d6b977ac364112de2"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.105476 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nttm\" (UniqueName: \"kubernetes.io/projected/2e90df07-d5ed-42b7-a3d8-de62235b551d-kube-api-access-6nttm\") pod \"redhat-marketplace-bfc9c\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.117777 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" event={"ID":"bcde5fe1-42de-449a-9743-2f313a33659a","Type":"ContainerDied","Data":"1e20de379fc231bd77b7642446a4a8bfca561e2a6f19127b14f90a36af0f95dd"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.117849 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e20de379fc231bd77b7642446a4a8bfca561e2a6f19127b14f90a36af0f95dd" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.117947 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.140448 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.140977 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.640948995 +0000 UTC m=+108.605131879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.141371 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.143126 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.643108404 +0000 UTC m=+108.607291278 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.144564 4767 generic.go:334] "Generic (PLEG): container finished" podID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerID="13dfcd72212b62ec61c83184aa94965f05cfeff96d2690fdc622ee1bf63c2289" exitCode=0 Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.144802 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lhmz" event={"ID":"8d36dd2c-a6e9-4369-aae5-c657695233a5","Type":"ContainerDied","Data":"13dfcd72212b62ec61c83184aa94965f05cfeff96d2690fdc622ee1bf63c2289"} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.227872 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.243562 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.244010 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.7439666 +0000 UTC m=+108.708149484 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.245331 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.245843 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.74582545 +0000 UTC m=+108.710008324 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.346527 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.349483 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.849444735 +0000 UTC m=+108.813627609 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.352816 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:42 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:42 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:42 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.352867 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.448213 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.448627 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:42.948611626 +0000 UTC m=+108.912794500 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.509880 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-n85dl" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.539349 4767 patch_prober.go:28] interesting pod/apiserver-76f77b778f-bmcrg container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]log ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]etcd ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/generic-apiserver-start-informers ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/max-in-flight-filter ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 28 18:31:42 crc kubenswrapper[4767]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 28 18:31:42 crc kubenswrapper[4767]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/project.openshift.io-projectcache ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/openshift.io-startinformers ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 28 18:31:42 crc kubenswrapper[4767]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 28 18:31:42 crc kubenswrapper[4767]: livez check failed Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.539413 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" podUID="0c90af26-b1fa-4faf-bbba-903dc47f7a46" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.546043 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.546028212 podStartE2EDuration="2.546028212s" podCreationTimestamp="2026-01-28 18:31:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:42.282350788 +0000 UTC m=+108.246533662" watchObservedRunningTime="2026-01-28 18:31:42.546028212 +0000 UTC m=+108.510211086" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.550262 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.550677 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:43.050658521 +0000 UTC m=+109.014841395 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.620249 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.652011 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.652951 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:43.152928261 +0000 UTC m=+109.117111135 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.688577 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcqfz"] Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.715480 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9c"] Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.753864 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.753954 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kubelet-dir\") pod \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.753986 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kube-api-access\") pod \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\" (UID: \"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5\") " Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.754075 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:43.254053756 +0000 UTC m=+109.218236630 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.754128 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "2b5603a9-6e04-475d-99c3-9f9ff91ba6b5" (UID: "2b5603a9-6e04-475d-99c3-9f9ff91ba6b5"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.754158 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.754355 4767 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.754619 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-28 18:31:43.254610084 +0000 UTC m=+109.218792958 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-77vtc" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.762985 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "2b5603a9-6e04-475d-99c3-9f9ff91ba6b5" (UID: "2b5603a9-6e04-475d-99c3-9f9ff91ba6b5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.848261 4767 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.855964 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.856334 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2b5603a9-6e04-475d-99c3-9f9ff91ba6b5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.856418 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-28 18:31:43.356399161 +0000 UTC m=+109.320582035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.881370 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ns2sx"] Jan 28 18:31:42 crc kubenswrapper[4767]: E0128 18:31:42.881594 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b5603a9-6e04-475d-99c3-9f9ff91ba6b5" containerName="pruner" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.881608 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b5603a9-6e04-475d-99c3-9f9ff91ba6b5" containerName="pruner" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.881699 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b5603a9-6e04-475d-99c3-9f9ff91ba6b5" containerName="pruner" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.882485 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.884244 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.891769 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ns2sx"] Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.929980 4767 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-28T18:31:42.84829332Z","Handler":null,"Name":""} Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.935003 4767 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.935039 4767 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.959935 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.969286 4767 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 28 18:31:42 crc kubenswrapper[4767]: I0128 18:31:42.969338 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.041125 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-77vtc\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.062215 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.062693 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-catalog-content\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.064654 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-utilities\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.064872 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qzxlx\" (UniqueName: \"kubernetes.io/projected/2d04606e-e735-4d65-b208-b39f04aa1630-kube-api-access-qzxlx\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.071310 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.127299 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.156400 4767 generic.go:334] "Generic (PLEG): container finished" podID="848cd268-6278-49a5-9ac9-3e18122e6d5c" containerID="108ec89d5b17d73c369151e46439767e23d862cf52bebcc6bbfc373ea3bcd66f" exitCode=0 Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.156888 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"848cd268-6278-49a5-9ac9-3e18122e6d5c","Type":"ContainerDied","Data":"108ec89d5b17d73c369151e46439767e23d862cf52bebcc6bbfc373ea3bcd66f"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.158724 4767 generic.go:334] "Generic (PLEG): container finished" podID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerID="3c9f535c2187e6cc243264add42717998fbec5665ad90450db5da2b6ea5ae1f9" exitCode=0 Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.158798 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9c" event={"ID":"2e90df07-d5ed-42b7-a3d8-de62235b551d","Type":"ContainerDied","Data":"3c9f535c2187e6cc243264add42717998fbec5665ad90450db5da2b6ea5ae1f9"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.158821 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9c" event={"ID":"2e90df07-d5ed-42b7-a3d8-de62235b551d","Type":"ContainerStarted","Data":"7b4dd733fa0491b0ac8736274b97fa1ed23988192d7445fe981febcfa7e8169f"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.161535 4767 generic.go:334] "Generic (PLEG): container finished" podID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerID="dc68665c12e4570b764a6f86ba83232a4c75b921eafc2d07525903d485eed430" exitCode=0 Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.161594 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcqfz" event={"ID":"eff110e4-7a33-4782-86e1-efff7c646e6f","Type":"ContainerDied","Data":"dc68665c12e4570b764a6f86ba83232a4c75b921eafc2d07525903d485eed430"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.161628 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcqfz" event={"ID":"eff110e4-7a33-4782-86e1-efff7c646e6f","Type":"ContainerStarted","Data":"cbc0b597a869547186149a1268e53b1ab0d5d5099d47e1a8573bd60a43388fa6"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.164079 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"2b5603a9-6e04-475d-99c3-9f9ff91ba6b5","Type":"ContainerDied","Data":"48e511b50e66a9b6375a71aeb6794e17c59eda4ef1aa027fa14c6c6d51f2f164"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.164109 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48e511b50e66a9b6375a71aeb6794e17c59eda4ef1aa027fa14c6c6d51f2f164" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.164160 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.165570 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-catalog-content\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.165629 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-utilities\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.165675 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qzxlx\" (UniqueName: \"kubernetes.io/projected/2d04606e-e735-4d65-b208-b39f04aa1630-kube-api-access-qzxlx\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.166429 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-utilities\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.167165 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-catalog-content\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.189101 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" event={"ID":"ecf41ec8-6c32-4b5c-a157-f52cc4de995a","Type":"ContainerStarted","Data":"66faa308a865a83aef1cf83d6adf90cd30efba3e7d0c20d9cfd65aa1fac9f336"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.189154 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" event={"ID":"ecf41ec8-6c32-4b5c-a157-f52cc4de995a","Type":"ContainerStarted","Data":"18cfa0745427c79ef8b49c908c8e9d6a53dc41e642200990d1210bf93c85edea"} Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.191913 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qzxlx\" (UniqueName: \"kubernetes.io/projected/2d04606e-e735-4d65-b208-b39f04aa1630-kube-api-access-qzxlx\") pod \"redhat-operators-ns2sx\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.209000 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.290337 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-k5t94"] Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.307760 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k5t94"] Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.309421 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.325031 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:43 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:43 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:43 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.325398 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.385690 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-77vtc"] Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.469820 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ns2sx"] Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.469876 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-82vpb\" (UniqueName: \"kubernetes.io/projected/3482059d-fb54-44c1-8fd4-b4ca29e633e4-kube-api-access-82vpb\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.469949 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-utilities\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.470034 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-catalog-content\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: W0128 18:31:43.480482 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d04606e_e735_4d65_b208_b39f04aa1630.slice/crio-021c7926debadbf12ba87aa1ac151db9b609884bfbde068ee59abf0d034e71bb WatchSource:0}: Error finding container 021c7926debadbf12ba87aa1ac151db9b609884bfbde068ee59abf0d034e71bb: Status 404 returned error can't find the container with id 021c7926debadbf12ba87aa1ac151db9b609884bfbde068ee59abf0d034e71bb Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.571854 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-catalog-content\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.572288 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-82vpb\" (UniqueName: \"kubernetes.io/projected/3482059d-fb54-44c1-8fd4-b4ca29e633e4-kube-api-access-82vpb\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.572331 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-utilities\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.572414 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-catalog-content\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.572675 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-utilities\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.594933 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-82vpb\" (UniqueName: \"kubernetes.io/projected/3482059d-fb54-44c1-8fd4-b4ca29e633e4-kube-api-access-82vpb\") pod \"redhat-operators-k5t94\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:43 crc kubenswrapper[4767]: I0128 18:31:43.644523 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.046749 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k5t94"] Jan 28 18:31:44 crc kubenswrapper[4767]: W0128 18:31:44.158886 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3482059d_fb54_44c1_8fd4_b4ca29e633e4.slice/crio-e8f124c04b104347914a03864ccdf425d41a0886ea3299362b441f996bfd3263 WatchSource:0}: Error finding container e8f124c04b104347914a03864ccdf425d41a0886ea3299362b441f996bfd3263: Status 404 returned error can't find the container with id e8f124c04b104347914a03864ccdf425d41a0886ea3299362b441f996bfd3263 Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.202736 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k5t94" event={"ID":"3482059d-fb54-44c1-8fd4-b4ca29e633e4","Type":"ContainerStarted","Data":"e8f124c04b104347914a03864ccdf425d41a0886ea3299362b441f996bfd3263"} Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.219911 4767 generic.go:334] "Generic (PLEG): container finished" podID="2d04606e-e735-4d65-b208-b39f04aa1630" containerID="13dab8432247edf469f61177543224f8e3cf4f9e07e80586ffa37bb0a9cfaac3" exitCode=0 Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.220030 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ns2sx" event={"ID":"2d04606e-e735-4d65-b208-b39f04aa1630","Type":"ContainerDied","Data":"13dab8432247edf469f61177543224f8e3cf4f9e07e80586ffa37bb0a9cfaac3"} Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.220058 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ns2sx" event={"ID":"2d04606e-e735-4d65-b208-b39f04aa1630","Type":"ContainerStarted","Data":"021c7926debadbf12ba87aa1ac151db9b609884bfbde068ee59abf0d034e71bb"} Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.234915 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" event={"ID":"b803ae3e-471d-4c86-a375-c4e7ab4403cd","Type":"ContainerStarted","Data":"b6503fecf8c37d9a30928ecd8028313e8c74ce6442f8f24626e9ffdc0194dda5"} Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.234966 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" event={"ID":"b803ae3e-471d-4c86-a375-c4e7ab4403cd","Type":"ContainerStarted","Data":"83ffea9b9058f4523f754e6113f9aa2f6f7a30a6fd4e5d6a1c4605b6e1b36aa6"} Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.235012 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.245708 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" event={"ID":"ecf41ec8-6c32-4b5c-a157-f52cc4de995a","Type":"ContainerStarted","Data":"757f528b6b2921e7e77195448477ad98ebd7175f0798939bc90b1be720490a00"} Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.325249 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:44 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:44 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:44 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.325305 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.325892 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" podStartSLOduration=86.325873165 podStartE2EDuration="1m26.325873165s" podCreationTimestamp="2026-01-28 18:30:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:44.324261173 +0000 UTC m=+110.288444067" watchObservedRunningTime="2026-01-28 18:31:44.325873165 +0000 UTC m=+110.290056039" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.366068 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-5h8dc" podStartSLOduration=18.366024538 podStartE2EDuration="18.366024538s" podCreationTimestamp="2026-01-28 18:31:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:31:44.355699868 +0000 UTC m=+110.319882742" watchObservedRunningTime="2026-01-28 18:31:44.366024538 +0000 UTC m=+110.330207412" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.627101 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.805060 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/848cd268-6278-49a5-9ac9-3e18122e6d5c-kube-api-access\") pod \"848cd268-6278-49a5-9ac9-3e18122e6d5c\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.805715 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/848cd268-6278-49a5-9ac9-3e18122e6d5c-kubelet-dir\") pod \"848cd268-6278-49a5-9ac9-3e18122e6d5c\" (UID: \"848cd268-6278-49a5-9ac9-3e18122e6d5c\") " Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.806086 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/848cd268-6278-49a5-9ac9-3e18122e6d5c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "848cd268-6278-49a5-9ac9-3e18122e6d5c" (UID: "848cd268-6278-49a5-9ac9-3e18122e6d5c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.814910 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/848cd268-6278-49a5-9ac9-3e18122e6d5c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "848cd268-6278-49a5-9ac9-3e18122e6d5c" (UID: "848cd268-6278-49a5-9ac9-3e18122e6d5c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.815153 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.907478 4767 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/848cd268-6278-49a5-9ac9-3e18122e6d5c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:31:44 crc kubenswrapper[4767]: I0128 18:31:44.907502 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/848cd268-6278-49a5-9ac9-3e18122e6d5c-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.279330 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.279287 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"848cd268-6278-49a5-9ac9-3e18122e6d5c","Type":"ContainerDied","Data":"6b02a564a36797dcde76b63a1b574a6c18c827037295439d6b977ac364112de2"} Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.279426 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b02a564a36797dcde76b63a1b574a6c18c827037295439d6b977ac364112de2" Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.285530 4767 generic.go:334] "Generic (PLEG): container finished" podID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerID="24b8d1ffb4d3253d79b444fd87a36f35c3a14f0287a0a7d097772358984e63df" exitCode=0 Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.285806 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k5t94" event={"ID":"3482059d-fb54-44c1-8fd4-b4ca29e633e4","Type":"ContainerDied","Data":"24b8d1ffb4d3253d79b444fd87a36f35c3a14f0287a0a7d097772358984e63df"} Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.306195 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-4tbt4" Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.336279 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:45 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:45 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:45 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:45 crc kubenswrapper[4767]: I0128 18:31:45.336360 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:46 crc kubenswrapper[4767]: I0128 18:31:46.324859 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:46 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:46 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:46 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:46 crc kubenswrapper[4767]: I0128 18:31:46.325278 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:47 crc kubenswrapper[4767]: I0128 18:31:47.322180 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:47 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:47 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:47 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:47 crc kubenswrapper[4767]: I0128 18:31:47.322246 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:47 crc kubenswrapper[4767]: I0128 18:31:47.528981 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:47 crc kubenswrapper[4767]: I0128 18:31:47.534007 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-bmcrg" Jan 28 18:31:48 crc kubenswrapper[4767]: I0128 18:31:48.323838 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:48 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:48 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:48 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:48 crc kubenswrapper[4767]: I0128 18:31:48.323902 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.211235 4767 patch_prober.go:28] interesting pod/console-f9d7485db-qfdzz container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" start-of-body= Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.211328 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-qfdzz" podUID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" containerName="console" probeResult="failure" output="Get \"https://10.217.0.23:8443/health\": dial tcp 10.217.0.23:8443: connect: connection refused" Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.321308 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:49 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:49 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:49 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.321975 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.611145 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.611223 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.612187 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:49 crc kubenswrapper[4767]: I0128 18:31:49.612319 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:50 crc kubenswrapper[4767]: I0128 18:31:50.321372 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:50 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:50 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:50 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:50 crc kubenswrapper[4767]: I0128 18:31:50.321424 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:51 crc kubenswrapper[4767]: I0128 18:31:51.321105 4767 patch_prober.go:28] interesting pod/router-default-5444994796-ttsxj container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 28 18:31:51 crc kubenswrapper[4767]: [-]has-synced failed: reason withheld Jan 28 18:31:51 crc kubenswrapper[4767]: [+]process-running ok Jan 28 18:31:51 crc kubenswrapper[4767]: healthz check failed Jan 28 18:31:51 crc kubenswrapper[4767]: I0128 18:31:51.321162 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-ttsxj" podUID="2c588228-398e-4f20-bf14-03450184cc20" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:31:52 crc kubenswrapper[4767]: I0128 18:31:52.322712 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:52 crc kubenswrapper[4767]: I0128 18:31:52.326386 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-ttsxj" Jan 28 18:31:57 crc kubenswrapper[4767]: I0128 18:31:57.894432 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24jzn"] Jan 28 18:31:57 crc kubenswrapper[4767]: I0128 18:31:57.895145 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" containerID="cri-o://6bec9b5e01fffc7b3d852e96e63051e7cf48c8850f0d5ed113bd3f53fa4649a7" gracePeriod=30 Jan 28 18:31:57 crc kubenswrapper[4767]: I0128 18:31:57.932283 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs"] Jan 28 18:31:57 crc kubenswrapper[4767]: I0128 18:31:57.932957 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" containerID="cri-o://21059a77fb73980c706f8a6cc8ddd4a644d2d6c8772834feb1043e3d76c01f65" gracePeriod=30 Jan 28 18:31:58 crc kubenswrapper[4767]: I0128 18:31:58.466600 4767 generic.go:334] "Generic (PLEG): container finished" podID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerID="6bec9b5e01fffc7b3d852e96e63051e7cf48c8850f0d5ed113bd3f53fa4649a7" exitCode=0 Jan 28 18:31:58 crc kubenswrapper[4767]: I0128 18:31:58.466677 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" event={"ID":"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a","Type":"ContainerDied","Data":"6bec9b5e01fffc7b3d852e96e63051e7cf48c8850f0d5ed113bd3f53fa4649a7"} Jan 28 18:31:58 crc kubenswrapper[4767]: I0128 18:31:58.468705 4767 generic.go:334] "Generic (PLEG): container finished" podID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerID="21059a77fb73980c706f8a6cc8ddd4a644d2d6c8772834feb1043e3d76c01f65" exitCode=0 Jan 28 18:31:58 crc kubenswrapper[4767]: I0128 18:31:58.468737 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" event={"ID":"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0","Type":"ContainerDied","Data":"21059a77fb73980c706f8a6cc8ddd4a644d2d6c8772834feb1043e3d76c01f65"} Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.066295 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.066429 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.152945 4767 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx7rs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.153026 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.213942 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.222103 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.611414 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.611481 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.611538 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.611812 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.611932 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.612220 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"2f2a16009a88807ccf22b309c351fdb4ecbdbc775284d08ffb87ac5e96577025"} pod="openshift-console/downloads-7954f5f757-4svb7" containerMessage="Container download-server failed liveness probe, will be restarted" Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.612313 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" containerID="cri-o://2f2a16009a88807ccf22b309c351fdb4ecbdbc775284d08ffb87ac5e96577025" gracePeriod=2 Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.614417 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:31:59 crc kubenswrapper[4767]: I0128 18:31:59.614453 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:00 crc kubenswrapper[4767]: I0128 18:32:00.480886 4767 generic.go:334] "Generic (PLEG): container finished" podID="bfa657b3-28e7-4503-a332-a2cd83725356" containerID="2f2a16009a88807ccf22b309c351fdb4ecbdbc775284d08ffb87ac5e96577025" exitCode=0 Jan 28 18:32:00 crc kubenswrapper[4767]: I0128 18:32:00.480959 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-4svb7" event={"ID":"bfa657b3-28e7-4503-a332-a2cd83725356","Type":"ContainerDied","Data":"2f2a16009a88807ccf22b309c351fdb4ecbdbc775284d08ffb87ac5e96577025"} Jan 28 18:32:03 crc kubenswrapper[4767]: I0128 18:32:03.131918 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:32:09 crc kubenswrapper[4767]: I0128 18:32:09.065729 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 28 18:32:09 crc kubenswrapper[4767]: I0128 18:32:09.066220 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 28 18:32:09 crc kubenswrapper[4767]: I0128 18:32:09.149968 4767 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx7rs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" start-of-body= Jan 28 18:32:09 crc kubenswrapper[4767]: I0128 18:32:09.150028 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: connect: connection refused" Jan 28 18:32:09 crc kubenswrapper[4767]: I0128 18:32:09.613134 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:09 crc kubenswrapper[4767]: I0128 18:32:09.613230 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:10 crc kubenswrapper[4767]: I0128 18:32:10.061716 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-9n4rf" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.302854 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 18:32:18 crc kubenswrapper[4767]: E0128 18:32:18.303887 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="848cd268-6278-49a5-9ac9-3e18122e6d5c" containerName="pruner" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.303900 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="848cd268-6278-49a5-9ac9-3e18122e6d5c" containerName="pruner" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.304984 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="848cd268-6278-49a5-9ac9-3e18122e6d5c" containerName="pruner" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.307809 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.314177 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.314572 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.316687 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.416223 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.416365 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.517341 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.517473 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:18 crc kubenswrapper[4767]: I0128 18:32:18.517620 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:19 crc kubenswrapper[4767]: I0128 18:32:19.094194 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:19 crc kubenswrapper[4767]: I0128 18:32:19.231707 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:19 crc kubenswrapper[4767]: I0128 18:32:19.610599 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:19 crc kubenswrapper[4767]: I0128 18:32:19.610666 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:20 crc kubenswrapper[4767]: I0128 18:32:20.066369 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:32:20 crc kubenswrapper[4767]: I0128 18:32:20.066431 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:32:20 crc kubenswrapper[4767]: I0128 18:32:20.149728 4767 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx7rs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:32:20 crc kubenswrapper[4767]: I0128 18:32:20.149866 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:32:20 crc kubenswrapper[4767]: E0128 18:32:20.895072 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 18:32:20 crc kubenswrapper[4767]: E0128 18:32:20.895315 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h5g7k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-qcqfz_openshift-marketplace(eff110e4-7a33-4782-86e1-efff7c646e6f): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:20 crc kubenswrapper[4767]: E0128 18:32:20.896570 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-qcqfz" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" Jan 28 18:32:20 crc kubenswrapper[4767]: E0128 18:32:20.982373 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 28 18:32:20 crc kubenswrapper[4767]: E0128 18:32:20.982532 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6nttm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-bfc9c_openshift-marketplace(2e90df07-d5ed-42b7-a3d8-de62235b551d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:20 crc kubenswrapper[4767]: E0128 18:32:20.983707 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-bfc9c" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" Jan 28 18:32:23 crc kubenswrapper[4767]: E0128 18:32:23.469799 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 18:32:23 crc kubenswrapper[4767]: E0128 18:32:23.470318 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tx7zn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-gft5f_openshift-marketplace(daa183e4-f49d-4f7d-9f9b-66e42f869297): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:23 crc kubenswrapper[4767]: E0128 18:32:23.471482 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-gft5f" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.101318 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.103504 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.116558 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.202757 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kube-api-access\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.202830 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kubelet-dir\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.202847 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-var-lock\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.303740 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kubelet-dir\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.303779 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-var-lock\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.303875 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kube-api-access\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.303913 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kubelet-dir\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.303998 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-var-lock\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.324533 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kube-api-access\") pod \"installer-9-crc\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:24 crc kubenswrapper[4767]: I0128 18:32:24.427819 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.750815 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.752686 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.754078 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.754336 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.764867 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.768554 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.854604 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.854698 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.857710 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.867356 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.880199 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.880320 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.948563 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.966600 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:32:26 crc kubenswrapper[4767]: I0128 18:32:26.976363 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 28 18:32:29 crc kubenswrapper[4767]: I0128 18:32:29.610672 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:29 crc kubenswrapper[4767]: I0128 18:32:29.611339 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:30 crc kubenswrapper[4767]: I0128 18:32:30.066774 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: i/o timeout" start-of-body= Jan 28 18:32:30 crc kubenswrapper[4767]: I0128 18:32:30.066846 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: i/o timeout" Jan 28 18:32:30 crc kubenswrapper[4767]: I0128 18:32:30.150331 4767 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx7rs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: i/o timeout (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:32:30 crc kubenswrapper[4767]: I0128 18:32:30.150433 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": dial tcp 10.217.0.10:8443: i/o timeout (Client.Timeout exceeded while awaiting headers)" Jan 28 18:32:32 crc kubenswrapper[4767]: E0128 18:32:32.874651 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 28 18:32:32 crc kubenswrapper[4767]: E0128 18:32:32.874923 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bjgbt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-86rk5_openshift-marketplace(c91bd017-c929-4891-9118-95e20ef61238): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:32 crc kubenswrapper[4767]: E0128 18:32:32.876294 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-86rk5" podUID="c91bd017-c929-4891-9118-95e20ef61238" Jan 28 18:32:38 crc kubenswrapper[4767]: E0128 18:32:38.566506 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-86rk5" podUID="c91bd017-c929-4891-9118-95e20ef61238" Jan 28 18:32:38 crc kubenswrapper[4767]: E0128 18:32:38.594701 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 18:32:38 crc kubenswrapper[4767]: E0128 18:32:38.594931 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8v5hl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-6w58h_openshift-marketplace(b0ee47b3-df01-4b36-8d19-ead3db6a705d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:38 crc kubenswrapper[4767]: E0128 18:32:38.597485 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-6w58h" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" Jan 28 18:32:39 crc kubenswrapper[4767]: I0128 18:32:39.611150 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:39 crc kubenswrapper[4767]: I0128 18:32:39.611651 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:40 crc kubenswrapper[4767]: I0128 18:32:40.065800 4767 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-24jzn container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:32:40 crc kubenswrapper[4767]: I0128 18:32:40.065951 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:32:40 crc kubenswrapper[4767]: I0128 18:32:40.150940 4767 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-vx7rs container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:32:40 crc kubenswrapper[4767]: I0128 18:32:40.151045 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.10:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:32:45 crc kubenswrapper[4767]: I0128 18:32:45.456627 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:32:45 crc kubenswrapper[4767]: I0128 18:32:45.456771 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:32:46 crc kubenswrapper[4767]: E0128 18:32:46.845793 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-6w58h" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.003157 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.022585 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.051096 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb"] Jan 28 18:32:47 crc kubenswrapper[4767]: E0128 18:32:47.051424 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.051440 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" Jan 28 18:32:47 crc kubenswrapper[4767]: E0128 18:32:47.051456 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.051466 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.051585 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" containerName="route-controller-manager" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.051596 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" containerName="controller-manager" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.053449 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.073964 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb"] Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.202771 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-serving-cert\") pod \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.202924 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-client-ca\") pod \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203045 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zb6k2\" (UniqueName: \"kubernetes.io/projected/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-kube-api-access-zb6k2\") pod \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203086 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-serving-cert\") pod \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203116 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-config\") pod \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203156 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qpb5\" (UniqueName: \"kubernetes.io/projected/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-kube-api-access-4qpb5\") pod \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203193 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-client-ca\") pod \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203263 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-config\") pod \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\" (UID: \"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203309 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-proxy-ca-bundles\") pod \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\" (UID: \"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a\") " Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203574 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4264m\" (UniqueName: \"kubernetes.io/projected/49ab6080-e934-47a4-aea4-a437401a6b1d-kube-api-access-4264m\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203680 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-config\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203730 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-client-ca\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.203784 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49ab6080-e934-47a4-aea4-a437401a6b1d-serving-cert\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.204709 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" (UID: "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.204803 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-client-ca" (OuterVolumeSpecName: "client-ca") pod "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" (UID: "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.205010 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-config" (OuterVolumeSpecName: "config") pod "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" (UID: "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.205320 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-client-ca" (OuterVolumeSpecName: "client-ca") pod "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" (UID: "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.205764 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-config" (OuterVolumeSpecName: "config") pod "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" (UID: "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.212003 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" (UID: "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.212976 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-kube-api-access-4qpb5" (OuterVolumeSpecName: "kube-api-access-4qpb5") pod "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" (UID: "cfddbedb-c663-4cb3-ae11-1e0dc851cb2a"). InnerVolumeSpecName "kube-api-access-4qpb5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.213043 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-kube-api-access-zb6k2" (OuterVolumeSpecName: "kube-api-access-zb6k2") pod "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" (UID: "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0"). InnerVolumeSpecName "kube-api-access-zb6k2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.214004 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" (UID: "b94d9646-3e32-4ab1-bf06-0fc5f349c2c0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305379 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4264m\" (UniqueName: \"kubernetes.io/projected/49ab6080-e934-47a4-aea4-a437401a6b1d-kube-api-access-4264m\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305525 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-config\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305568 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-client-ca\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305608 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49ab6080-e934-47a4-aea4-a437401a6b1d-serving-cert\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305671 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zb6k2\" (UniqueName: \"kubernetes.io/projected/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-kube-api-access-zb6k2\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305690 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305707 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305722 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qpb5\" (UniqueName: \"kubernetes.io/projected/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-kube-api-access-4qpb5\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305734 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305750 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305761 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305774 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.305786 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.308106 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-client-ca\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.308256 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-config\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.310766 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49ab6080-e934-47a4-aea4-a437401a6b1d-serving-cert\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.327066 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4264m\" (UniqueName: \"kubernetes.io/projected/49ab6080-e934-47a4-aea4-a437401a6b1d-kube-api-access-4264m\") pod \"route-controller-manager-7956cc4f9c-rt2zb\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.400725 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.402564 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 28 18:32:47 crc kubenswrapper[4767]: W0128 18:32:47.406798 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-poddc3fefa4_19b5_4725_9a82_ab64b0c735f5.slice/crio-48bf93c1af011fac9c618943a309c334ea8cf039b775ff85d20b96c515a89629 WatchSource:0}: Error finding container 48bf93c1af011fac9c618943a309c334ea8cf039b775ff85d20b96c515a89629: Status 404 returned error can't find the container with id 48bf93c1af011fac9c618943a309c334ea8cf039b775ff85d20b96c515a89629 Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.440931 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 28 18:32:47 crc kubenswrapper[4767]: W0128 18:32:47.466748 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-d3564d5cfc34022a76e07265b27b891babad735d05fe848bd373dceb9c904e9f WatchSource:0}: Error finding container d3564d5cfc34022a76e07265b27b891babad735d05fe848bd373dceb9c904e9f: Status 404 returned error can't find the container with id d3564d5cfc34022a76e07265b27b891babad735d05fe848bd373dceb9c904e9f Jan 28 18:32:47 crc kubenswrapper[4767]: W0128 18:32:47.471235 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-90d990b242a8aa8e298021e83097604d0302afc8ddd1623cf2e0580aeea00de3 WatchSource:0}: Error finding container 90d990b242a8aa8e298021e83097604d0302afc8ddd1623cf2e0580aeea00de3: Status 404 returned error can't find the container with id 90d990b242a8aa8e298021e83097604d0302afc8ddd1623cf2e0580aeea00de3 Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.663567 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb"] Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.739357 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"d3564d5cfc34022a76e07265b27b891babad735d05fe848bd373dceb9c904e9f"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.740518 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" event={"ID":"49ab6080-e934-47a4-aea4-a437401a6b1d","Type":"ContainerStarted","Data":"c10517a1ff580e9413a8fe0c2f732102fa8454a6c64ede0226fcc140bdb1ef7f"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.741314 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dc3fefa4-19b5-4725-9a82-ab64b0c735f5","Type":"ContainerStarted","Data":"48bf93c1af011fac9c618943a309c334ea8cf039b775ff85d20b96c515a89629"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.742378 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba","Type":"ContainerStarted","Data":"021d3b4e6a3b3aa4a214926cfc330f242ca8cafe0238e97261be27a8cd0852e8"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.743146 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f98a31117c82f16f9cefc2576404486368492cb5018d3272231697fac9b76023"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.744907 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" event={"ID":"cfddbedb-c663-4cb3-ae11-1e0dc851cb2a","Type":"ContainerDied","Data":"afaa4b90e785a316fe01ea76ac719e9320a6ce19e9a0dc50d4f8fc9b083d7f68"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.744998 4767 scope.go:117] "RemoveContainer" containerID="6bec9b5e01fffc7b3d852e96e63051e7cf48c8850f0d5ed113bd3f53fa4649a7" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.745287 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-24jzn" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.749496 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.749488 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs" event={"ID":"b94d9646-3e32-4ab1-bf06-0fc5f349c2c0","Type":"ContainerDied","Data":"75e07472e49ac1c56f0714f46d8080b4fc7f0ef9d1f4762e4f6a35dd00d2a729"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.750401 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"90d990b242a8aa8e298021e83097604d0302afc8ddd1623cf2e0580aeea00de3"} Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.776536 4767 scope.go:117] "RemoveContainer" containerID="21059a77fb73980c706f8a6cc8ddd4a644d2d6c8772834feb1043e3d76c01f65" Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.793394 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs"] Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.796575 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-vx7rs"] Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.820859 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24jzn"] Jan 28 18:32:47 crc kubenswrapper[4767]: I0128 18:32:47.825132 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-24jzn"] Jan 28 18:32:48 crc kubenswrapper[4767]: I0128 18:32:48.809581 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b94d9646-3e32-4ab1-bf06-0fc5f349c2c0" path="/var/lib/kubelet/pods/b94d9646-3e32-4ab1-bf06-0fc5f349c2c0/volumes" Jan 28 18:32:48 crc kubenswrapper[4767]: I0128 18:32:48.812645 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfddbedb-c663-4cb3-ae11-1e0dc851cb2a" path="/var/lib/kubelet/pods/cfddbedb-c663-4cb3-ae11-1e0dc851cb2a/volumes" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.371944 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g"] Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.373912 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.379158 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.379805 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.380104 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.380385 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.380879 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.379328 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.385688 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g"] Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.389509 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.541015 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-config\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.541105 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de9c3762-ae24-4aca-91c2-b56f539545de-serving-cert\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.541896 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-proxy-ca-bundles\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.542085 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdsjp\" (UniqueName: \"kubernetes.io/projected/de9c3762-ae24-4aca-91c2-b56f539545de-kube-api-access-mdsjp\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.542429 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-client-ca\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.610796 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.610912 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.646524 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-proxy-ca-bundles\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.646605 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdsjp\" (UniqueName: \"kubernetes.io/projected/de9c3762-ae24-4aca-91c2-b56f539545de-kube-api-access-mdsjp\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.646682 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-client-ca\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.646725 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-config\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.646767 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de9c3762-ae24-4aca-91c2-b56f539545de-serving-cert\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.648038 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-proxy-ca-bundles\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.648056 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-client-ca\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.648422 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-config\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.662996 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de9c3762-ae24-4aca-91c2-b56f539545de-serving-cert\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.667329 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdsjp\" (UniqueName: \"kubernetes.io/projected/de9c3762-ae24-4aca-91c2-b56f539545de-kube-api-access-mdsjp\") pod \"controller-manager-7d5fb7db4c-w4s8g\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.703047 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.777223 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" event={"ID":"49ab6080-e934-47a4-aea4-a437401a6b1d","Type":"ContainerStarted","Data":"734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8"} Jan 28 18:32:49 crc kubenswrapper[4767]: I0128 18:32:49.779238 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba","Type":"ContainerStarted","Data":"e295f5e4c7f8a2e3496992805a1c5df490432d019481b03e7ce8bc138d100383"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.164038 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g"] Jan 28 18:32:50 crc kubenswrapper[4767]: E0128 18:32:50.726848 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 18:32:50 crc kubenswrapper[4767]: E0128 18:32:50.728241 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qzxlx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-ns2sx_openshift-marketplace(2d04606e-e735-4d65-b208-b39f04aa1630): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:50 crc kubenswrapper[4767]: E0128 18:32:50.729547 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-ns2sx" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.787567 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f2f0ffd7c802b30cbabb92be711714ab968b4b9c7ec667efd222a49854427b35"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.787652 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.789135 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f4f1f2a6d6d42e8548d75e945279bc45c25cfcbc76a8bb33faf382b4200b8478"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.790764 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" event={"ID":"de9c3762-ae24-4aca-91c2-b56f539545de","Type":"ContainerStarted","Data":"0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.790871 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" event={"ID":"de9c3762-ae24-4aca-91c2-b56f539545de","Type":"ContainerStarted","Data":"68eb94a05e138b76731d5c38deb39e237dd55546bde4f109ee0a262fd4275e41"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.792272 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"1448c698b11570075ddc5aee1dbe797a026809c2a703d158d7f4607b9ecb05ba"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.793953 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dc3fefa4-19b5-4725-9a82-ab64b0c735f5","Type":"ContainerStarted","Data":"b90f63be3d81542cb75fc09352d840c32adb09baa2eada9a62f02aa5370bcd8a"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.803482 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-4svb7" event={"ID":"bfa657b3-28e7-4503-a332-a2cd83725356","Type":"ContainerStarted","Data":"d3fff42b7b2ae07768b15ade78be9f74a4e3897d6b8b60048daa34d420a33629"} Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.805746 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.806024 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.806077 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.806228 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:50 crc kubenswrapper[4767]: E0128 18:32:50.809700 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-ns2sx" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.810615 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.828124 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=26.82810758 podStartE2EDuration="26.82810758s" podCreationTimestamp="2026-01-28 18:32:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:32:50.826775119 +0000 UTC m=+176.790958003" watchObservedRunningTime="2026-01-28 18:32:50.82810758 +0000 UTC m=+176.792290454" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.900344 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=32.900318548 podStartE2EDuration="32.900318548s" podCreationTimestamp="2026-01-28 18:32:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:32:50.886335301 +0000 UTC m=+176.850518175" watchObservedRunningTime="2026-01-28 18:32:50.900318548 +0000 UTC m=+176.864501412" Jan 28 18:32:50 crc kubenswrapper[4767]: I0128 18:32:50.913471 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" podStartSLOduration=33.913448 podStartE2EDuration="33.913448s" podCreationTimestamp="2026-01-28 18:32:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:32:50.912033825 +0000 UTC m=+176.876216709" watchObservedRunningTime="2026-01-28 18:32:50.913448 +0000 UTC m=+176.877630874" Jan 28 18:32:51 crc kubenswrapper[4767]: I0128 18:32:51.808671 4767 generic.go:334] "Generic (PLEG): container finished" podID="e5367aa4-a0a8-481d-8205-4a73c4d4b6ba" containerID="e295f5e4c7f8a2e3496992805a1c5df490432d019481b03e7ce8bc138d100383" exitCode=0 Jan 28 18:32:51 crc kubenswrapper[4767]: I0128 18:32:51.808812 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba","Type":"ContainerDied","Data":"e295f5e4c7f8a2e3496992805a1c5df490432d019481b03e7ce8bc138d100383"} Jan 28 18:32:51 crc kubenswrapper[4767]: I0128 18:32:51.809217 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:51 crc kubenswrapper[4767]: I0128 18:32:51.809258 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:51 crc kubenswrapper[4767]: E0128 18:32:51.937796 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 28 18:32:51 crc kubenswrapper[4767]: E0128 18:32:51.938020 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nkq77,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-4lhmz_openshift-marketplace(8d36dd2c-a6e9-4369-aae5-c657695233a5): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:51 crc kubenswrapper[4767]: E0128 18:32:51.940298 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-4lhmz" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" Jan 28 18:32:51 crc kubenswrapper[4767]: E0128 18:32:51.972172 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 28 18:32:51 crc kubenswrapper[4767]: E0128 18:32:51.972415 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-82vpb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-k5t94_openshift-marketplace(3482059d-fb54-44c1-8fd4-b4ca29e633e4): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 18:32:51 crc kubenswrapper[4767]: E0128 18:32:51.973890 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-k5t94" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" Jan 28 18:32:52 crc kubenswrapper[4767]: I0128 18:32:52.815591 4767 patch_prober.go:28] interesting pod/downloads-7954f5f757-4svb7 container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 28 18:32:52 crc kubenswrapper[4767]: I0128 18:32:52.815828 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-4svb7" podUID="bfa657b3-28e7-4503-a332-a2cd83725356" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 28 18:32:53 crc kubenswrapper[4767]: I0128 18:32:53.818475 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:53 crc kubenswrapper[4767]: I0128 18:32:53.822084 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:32:53 crc kubenswrapper[4767]: I0128 18:32:53.835396 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" podStartSLOduration=36.835377914 podStartE2EDuration="36.835377914s" podCreationTimestamp="2026-01-28 18:32:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:32:53.833091903 +0000 UTC m=+179.797274767" watchObservedRunningTime="2026-01-28 18:32:53.835377914 +0000 UTC m=+179.799560788" Jan 28 18:32:54 crc kubenswrapper[4767]: E0128 18:32:54.214424 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-4lhmz" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" Jan 28 18:32:54 crc kubenswrapper[4767]: E0128 18:32:54.214864 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-k5t94" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.256938 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.357899 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kubelet-dir\") pod \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.358042 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e5367aa4-a0a8-481d-8205-4a73c4d4b6ba" (UID: "e5367aa4-a0a8-481d-8205-4a73c4d4b6ba"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.358082 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kube-api-access\") pod \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\" (UID: \"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba\") " Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.358502 4767 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.365300 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e5367aa4-a0a8-481d-8205-4a73c4d4b6ba" (UID: "e5367aa4-a0a8-481d-8205-4a73c4d4b6ba"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.460624 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e5367aa4-a0a8-481d-8205-4a73c4d4b6ba-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.824787 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.824958 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"e5367aa4-a0a8-481d-8205-4a73c4d4b6ba","Type":"ContainerDied","Data":"021d3b4e6a3b3aa4a214926cfc330f242ca8cafe0238e97261be27a8cd0852e8"} Jan 28 18:32:54 crc kubenswrapper[4767]: I0128 18:32:54.825049 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="021d3b4e6a3b3aa4a214926cfc330f242ca8cafe0238e97261be27a8cd0852e8" Jan 28 18:32:59 crc kubenswrapper[4767]: I0128 18:32:59.616515 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-4svb7" Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.889925 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86rk5" event={"ID":"c91bd017-c929-4891-9118-95e20ef61238","Type":"ContainerStarted","Data":"7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2"} Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.893023 4767 generic.go:334] "Generic (PLEG): container finished" podID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerID="f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e" exitCode=0 Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.893086 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6w58h" event={"ID":"b0ee47b3-df01-4b36-8d19-ead3db6a705d","Type":"ContainerDied","Data":"f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e"} Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.894832 4767 generic.go:334] "Generic (PLEG): container finished" podID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerID="611188ca123c9d9653f7d7addf530286cc5e5a1ba26a1dd06f33a6ac4dc49bf8" exitCode=0 Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.894876 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gft5f" event={"ID":"daa183e4-f49d-4f7d-9f9b-66e42f869297","Type":"ContainerDied","Data":"611188ca123c9d9653f7d7addf530286cc5e5a1ba26a1dd06f33a6ac4dc49bf8"} Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.896242 4767 generic.go:334] "Generic (PLEG): container finished" podID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerID="c5528194888f5734c9ff748d5785546b4bd4f872f2299117d049fe6783556898" exitCode=0 Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.896282 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9c" event={"ID":"2e90df07-d5ed-42b7-a3d8-de62235b551d","Type":"ContainerDied","Data":"c5528194888f5734c9ff748d5785546b4bd4f872f2299117d049fe6783556898"} Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.897255 4767 generic.go:334] "Generic (PLEG): container finished" podID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerID="644a56fa9825c6197d0fce32b243da8c744df96f93f670f6361ed5b660e19177" exitCode=0 Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.897292 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcqfz" event={"ID":"eff110e4-7a33-4782-86e1-efff7c646e6f","Type":"ContainerDied","Data":"644a56fa9825c6197d0fce32b243da8c744df96f93f670f6361ed5b660e19177"} Jan 28 18:33:06 crc kubenswrapper[4767]: I0128 18:33:06.898222 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ns2sx" event={"ID":"2d04606e-e735-4d65-b208-b39f04aa1630","Type":"ContainerStarted","Data":"b62777e57f4e819b3e489b8744ede348486bcb0bb56c497a3e1e7a9acf7cbe3d"} Jan 28 18:33:07 crc kubenswrapper[4767]: I0128 18:33:07.904714 4767 generic.go:334] "Generic (PLEG): container finished" podID="2d04606e-e735-4d65-b208-b39f04aa1630" containerID="b62777e57f4e819b3e489b8744ede348486bcb0bb56c497a3e1e7a9acf7cbe3d" exitCode=0 Jan 28 18:33:07 crc kubenswrapper[4767]: I0128 18:33:07.904811 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ns2sx" event={"ID":"2d04606e-e735-4d65-b208-b39f04aa1630","Type":"ContainerDied","Data":"b62777e57f4e819b3e489b8744ede348486bcb0bb56c497a3e1e7a9acf7cbe3d"} Jan 28 18:33:07 crc kubenswrapper[4767]: I0128 18:33:07.907005 4767 generic.go:334] "Generic (PLEG): container finished" podID="c91bd017-c929-4891-9118-95e20ef61238" containerID="7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2" exitCode=0 Jan 28 18:33:07 crc kubenswrapper[4767]: I0128 18:33:07.907072 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86rk5" event={"ID":"c91bd017-c929-4891-9118-95e20ef61238","Type":"ContainerDied","Data":"7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2"} Jan 28 18:33:15 crc kubenswrapper[4767]: I0128 18:33:15.455693 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:33:15 crc kubenswrapper[4767]: I0128 18:33:15.456020 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.168044 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.761437 4767 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.761864 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5367aa4-a0a8-481d-8205-4a73c4d4b6ba" containerName="pruner" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.761932 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5367aa4-a0a8-481d-8205-4a73c4d4b6ba" containerName="pruner" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.762138 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5367aa4-a0a8-481d-8205-4a73c4d4b6ba" containerName="pruner" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.762541 4767 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.762792 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.762919 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac" gracePeriod=15 Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.763031 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378" gracePeriod=15 Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.762992 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed" gracePeriod=15 Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.763137 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97" gracePeriod=15 Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.763065 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f" gracePeriod=15 Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.765882 4767 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766315 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766336 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766348 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766355 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766368 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766393 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766406 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766412 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766421 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766426 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766439 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766445 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766470 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766477 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.766487 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766493 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766674 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766707 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766714 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766722 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766732 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766742 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.766750 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.792494 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.792554 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.792587 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.792932 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.793048 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.793177 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.793373 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.793485 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: E0128 18:33:27.894195 4767 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" volumeName="registry-storage" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.894413 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.894754 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.894460 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.894805 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.894942 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.894992 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.894995 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895020 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895103 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895139 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895161 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895263 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895266 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895272 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895294 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:27 crc kubenswrapper[4767]: I0128 18:33:27.895391 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:28 crc kubenswrapper[4767]: I0128 18:33:28.266731 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 18:33:28 crc kubenswrapper[4767]: I0128 18:33:28.267584 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 18:33:28 crc kubenswrapper[4767]: I0128 18:33:28.268174 4767 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97" exitCode=2 Jan 28 18:33:29 crc kubenswrapper[4767]: I0128 18:33:29.276425 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 18:33:29 crc kubenswrapper[4767]: I0128 18:33:29.278321 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 18:33:29 crc kubenswrapper[4767]: I0128 18:33:29.279460 4767 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed" exitCode=0 Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.288173 4767 generic.go:334] "Generic (PLEG): container finished" podID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" containerID="b90f63be3d81542cb75fc09352d840c32adb09baa2eada9a62f02aa5370bcd8a" exitCode=0 Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.288284 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dc3fefa4-19b5-4725-9a82-ab64b0c735f5","Type":"ContainerDied","Data":"b90f63be3d81542cb75fc09352d840c32adb09baa2eada9a62f02aa5370bcd8a"} Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.289497 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.291279 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.292718 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.293459 4767 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac" exitCode=0 Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.293491 4767 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f" exitCode=0 Jan 28 18:33:30 crc kubenswrapper[4767]: I0128 18:33:30.293549 4767 scope.go:117] "RemoveContainer" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.010080 4767 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.010653 4767 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.010902 4767 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.011128 4767 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.011402 4767 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:31 crc kubenswrapper[4767]: I0128 18:33:31.011461 4767 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.011656 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="200ms" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.212641 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="400ms" Jan 28 18:33:31 crc kubenswrapper[4767]: E0128 18:33:31.615790 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="800ms" Jan 28 18:33:32 crc kubenswrapper[4767]: E0128 18:33:32.417661 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="1.6s" Jan 28 18:33:32 crc kubenswrapper[4767]: E0128 18:33:32.804037 4767 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:32 crc kubenswrapper[4767]: I0128 18:33:32.804463 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:33:33 crc kubenswrapper[4767]: I0128 18:33:33.309648 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 18:33:33 crc kubenswrapper[4767]: I0128 18:33:33.310771 4767 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378" exitCode=0 Jan 28 18:33:34 crc kubenswrapper[4767]: E0128 18:33:34.019101 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="3.2s" Jan 28 18:33:34 crc kubenswrapper[4767]: I0128 18:33:34.797974 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:37 crc kubenswrapper[4767]: E0128 18:33:37.220198 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="6.4s" Jan 28 18:33:41 crc kubenswrapper[4767]: I0128 18:33:41.411742 4767 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Liveness probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 28 18:33:41 crc kubenswrapper[4767]: I0128 18:33:41.412183 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 28 18:33:41 crc kubenswrapper[4767]: E0128 18:33:41.413244 4767 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event=< Jan 28 18:33:41 crc kubenswrapper[4767]: &Event{ObjectMeta:{kube-controller-manager-crc.188ef8c353d81035 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Liveness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 18:33:41 crc kubenswrapper[4767]: body: Jan 28 18:33:41 crc kubenswrapper[4767]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 18:33:41.412163637 +0000 UTC m=+227.376346511,LastTimestamp:2026-01-28 18:33:41.412163637 +0000 UTC m=+227.376346511,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 18:33:41 crc kubenswrapper[4767]: > Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.265945 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.267508 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.368843 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"dc3fefa4-19b5-4725-9a82-ab64b0c735f5","Type":"ContainerDied","Data":"48bf93c1af011fac9c618943a309c334ea8cf039b775ff85d20b96c515a89629"} Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.368877 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.368894 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="48bf93c1af011fac9c618943a309c334ea8cf039b775ff85d20b96c515a89629" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.371506 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.371654 4767 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2" exitCode=1 Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.371836 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2"} Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.372967 4767 scope.go:117] "RemoveContainer" containerID="b2e8990e2a16c2ecf46bba7ab3799bddbb7fc54ca9c7d50c043fb608ed9720a2" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.373281 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.373550 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.400499 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kube-api-access\") pod \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.400657 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kubelet-dir\") pod \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.400731 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-var-lock\") pod \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\" (UID: \"dc3fefa4-19b5-4725-9a82-ab64b0c735f5\") " Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.400817 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "dc3fefa4-19b5-4725-9a82-ab64b0c735f5" (UID: "dc3fefa4-19b5-4725-9a82-ab64b0c735f5"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.400908 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-var-lock" (OuterVolumeSpecName: "var-lock") pod "dc3fefa4-19b5-4725-9a82-ab64b0c735f5" (UID: "dc3fefa4-19b5-4725-9a82-ab64b0c735f5"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.401091 4767 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.419886 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "dc3fefa4-19b5-4725-9a82-ab64b0c735f5" (UID: "dc3fefa4-19b5-4725-9a82-ab64b0c735f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.503010 4767 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.503072 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/dc3fefa4-19b5-4725-9a82-ab64b0c735f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.690130 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:42 crc kubenswrapper[4767]: I0128 18:33:42.690757 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:43 crc kubenswrapper[4767]: E0128 18:33:43.621333 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="7s" Jan 28 18:33:44 crc kubenswrapper[4767]: I0128 18:33:44.798643 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:44 crc kubenswrapper[4767]: I0128 18:33:44.799509 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:45 crc kubenswrapper[4767]: I0128 18:33:45.454958 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:33:45 crc kubenswrapper[4767]: I0128 18:33:45.455050 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:33:45 crc kubenswrapper[4767]: I0128 18:33:45.455137 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:33:45 crc kubenswrapper[4767]: I0128 18:33:45.456151 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:33:45 crc kubenswrapper[4767]: I0128 18:33:45.456259 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753" gracePeriod=600 Jan 28 18:33:46 crc kubenswrapper[4767]: E0128 18:33:46.998853 4767 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf729b63a_09d0_4095_add6_3e40fbd43e1c.slice/crio-conmon-11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753.scope\": RecentStats: unable to find data in memory cache]" Jan 28 18:33:47 crc kubenswrapper[4767]: I0128 18:33:47.403437 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753" exitCode=0 Jan 28 18:33:47 crc kubenswrapper[4767]: I0128 18:33:47.403535 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753"} Jan 28 18:33:48 crc kubenswrapper[4767]: I0128 18:33:48.288475 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:33:48 crc kubenswrapper[4767]: E0128 18:33:48.940970 4767 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event=< Jan 28 18:33:48 crc kubenswrapper[4767]: &Event{ObjectMeta:{kube-controller-manager-crc.188ef8c353d81035 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Liveness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 18:33:48 crc kubenswrapper[4767]: body: Jan 28 18:33:48 crc kubenswrapper[4767]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 18:33:41.412163637 +0000 UTC m=+227.376346511,LastTimestamp:2026-01-28 18:33:41.412163637 +0000 UTC m=+227.376346511,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 18:33:48 crc kubenswrapper[4767]: > Jan 28 18:33:49 crc kubenswrapper[4767]: I0128 18:33:49.251143 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:33:50 crc kubenswrapper[4767]: E0128 18:33:50.622402 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="7s" Jan 28 18:33:51 crc kubenswrapper[4767]: I0128 18:33:51.412292 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.026835 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.027835 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.028381 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.028558 4767 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.028913 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038338 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038440 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038492 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038433 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038455 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038662 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038853 4767 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038886 4767 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.038897 4767 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.442344 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.442951 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.458788 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.458944 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.459189 4767 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:52 crc kubenswrapper[4767]: I0128 18:33:52.801371 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 28 18:33:54 crc kubenswrapper[4767]: I0128 18:33:54.798539 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:54 crc kubenswrapper[4767]: I0128 18:33:54.799467 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:57 crc kubenswrapper[4767]: E0128 18:33:57.623863 4767 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.110:6443: connect: connection refused" interval="7s" Jan 28 18:33:57 crc kubenswrapper[4767]: I0128 18:33:57.794636 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:57 crc kubenswrapper[4767]: I0128 18:33:57.796113 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:57 crc kubenswrapper[4767]: I0128 18:33:57.796927 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:33:57 crc kubenswrapper[4767]: I0128 18:33:57.817033 4767 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:33:57 crc kubenswrapper[4767]: I0128 18:33:57.817073 4767 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:33:57 crc kubenswrapper[4767]: E0128 18:33:57.817799 4767 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:57 crc kubenswrapper[4767]: I0128 18:33:57.818651 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:33:58 crc kubenswrapper[4767]: E0128 18:33:58.942568 4767 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/events\": dial tcp 38.102.83.110:6443: connect: connection refused" event=< Jan 28 18:33:58 crc kubenswrapper[4767]: &Event{ObjectMeta:{kube-controller-manager-crc.188ef8c353d81035 openshift-kube-controller-manager 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-controller-manager,Name:kube-controller-manager-crc,UID:f614b9022728cf315e60c057852e563e,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{kube-controller-manager},},Reason:ProbeError,Message:Liveness probe error: Get "https://192.168.126.11:10257/healthz": dial tcp 192.168.126.11:10257: connect: connection refused Jan 28 18:33:58 crc kubenswrapper[4767]: body: Jan 28 18:33:58 crc kubenswrapper[4767]: ,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-28 18:33:41.412163637 +0000 UTC m=+227.376346511,LastTimestamp:2026-01-28 18:33:41.412163637 +0000 UTC m=+227.376346511,Count:1,Type:Warning,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,} Jan 28 18:33:58 crc kubenswrapper[4767]: > Jan 28 18:34:00 crc kubenswrapper[4767]: I0128 18:34:00.014688 4767 scope.go:117] "RemoveContainer" containerID="5a1828de2cde56fee37d73cfe7663329d5bb9521c68006adf9848b6d9ef16eac" Jan 28 18:34:02 crc kubenswrapper[4767]: I0128 18:34:02.947929 4767 scope.go:117] "RemoveContainer" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" Jan 28 18:34:02 crc kubenswrapper[4767]: E0128 18:34:02.948859 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\": container with ID starting with abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce not found: ID does not exist" containerID="abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce" Jan 28 18:34:02 crc kubenswrapper[4767]: I0128 18:34:02.948903 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce"} err="failed to get container status \"abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\": rpc error: code = NotFound desc = could not find container \"abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce\": container with ID starting with abca3bbfa2f4d6ae993489763ca4f697d1c5b6f3fb66fb4d00db93641f4d2bce not found: ID does not exist" Jan 28 18:34:02 crc kubenswrapper[4767]: I0128 18:34:02.948928 4767 scope.go:117] "RemoveContainer" containerID="1e00f3930861e2c8b10c525045349837932b8dfbafb9c2393b13f9c19a2707ed" Jan 28 18:34:02 crc kubenswrapper[4767]: W0128 18:34:02.954501 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-169df14cca312b028145bdd03f24d0dc9ec52933cec6419cb820ce0651edffd8 WatchSource:0}: Error finding container 169df14cca312b028145bdd03f24d0dc9ec52933cec6419cb820ce0651edffd8: Status 404 returned error can't find the container with id 169df14cca312b028145bdd03f24d0dc9ec52933cec6419cb820ce0651edffd8 Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.058952 4767 scope.go:117] "RemoveContainer" containerID="6ad40f56703d2f4d1a9c9d72916ca846318e8612d7c0ccca25228a8fc647220f" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.099332 4767 scope.go:117] "RemoveContainer" containerID="674aeb104a4991a8591a83a59864688be36d915e03774b941280b4704b938b97" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.170485 4767 scope.go:117] "RemoveContainer" containerID="638b3ccc93d634bc5bb44dc3c6dfafa7cb070840cc53105dfa45531d7922a378" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.195821 4767 scope.go:117] "RemoveContainer" containerID="7e25b1dc9ecb628aaea9f1174ad25a9ad3278223c54ab9bd72e10ef9230a7908" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.519301 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86rk5" event={"ID":"c91bd017-c929-4891-9118-95e20ef61238","Type":"ContainerStarted","Data":"87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.520343 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.520588 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.520813 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.522264 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gft5f" event={"ID":"daa183e4-f49d-4f7d-9f9b-66e42f869297","Type":"ContainerStarted","Data":"7f9ecf1abf37507d1245723219e7a04a604631a258966f076ba2ff0dcdded938"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.523229 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.523799 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.524167 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.524508 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.527031 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ns2sx" event={"ID":"2d04606e-e735-4d65-b208-b39f04aa1630","Type":"ContainerStarted","Data":"eb0d1fcb7ddc7bad27ea096801c2acc3a5fccc1bcb1f584bed5928929edd6d3d"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.527853 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.528165 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.528442 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.528695 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.528957 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.533740 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lhmz" event={"ID":"8d36dd2c-a6e9-4369-aae5-c657695233a5","Type":"ContainerStarted","Data":"d2c3c4c294e1f04ffbbb5488a7c5de85b971fc91ff8105e19d696958350c523c"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.534602 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.534852 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.535103 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.535415 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.536075 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.536334 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.537821 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6w58h" event={"ID":"b0ee47b3-df01-4b36-8d19-ead3db6a705d","Type":"ContainerStarted","Data":"b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.539234 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.539567 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.539788 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.540007 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.540120 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"b576c8e60ec56d24f4fa773e84c82ca50182158dea49adb0de3372c3d2f9d721"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.540163 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"169df14cca312b028145bdd03f24d0dc9ec52933cec6419cb820ce0651edffd8"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.540243 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.540487 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: E0128 18:34:03.540628 4767 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.540670 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.540925 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.541115 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.541393 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.541780 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.542108 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.542383 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.542586 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.542624 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k5t94" event={"ID":"3482059d-fb54-44c1-8fd4-b4ca29e633e4","Type":"ContainerStarted","Data":"abce2e2a228b450d143edb1f5d104ee5c697c98b65ac8b76f3b735b8d5161483"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.543288 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.543524 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.543803 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.544128 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.544428 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.544708 4767 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="2b3d748260e2e127e879f2773b0503bea1d94e040376d26651117bfc6dbb23d5" exitCode=0 Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.544715 4767 status_manager.go:851] "Failed to get status for pod" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" pod="openshift-marketplace/redhat-operators-k5t94" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-k5t94\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.544776 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"2b3d748260e2e127e879f2773b0503bea1d94e040376d26651117bfc6dbb23d5"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.544813 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"17e32049096bcf4b330f0656329a8ec8c91afc41ed99e56bd4849cf3ff7827aa"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.544981 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.545150 4767 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.545171 4767 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.545300 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: E0128 18:34:03.545418 4767 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.545664 4767 status_manager.go:851] "Failed to get status for pod" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" pod="openshift-marketplace/redhat-operators-k5t94" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-k5t94\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.545999 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.546431 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.546660 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.546867 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.547105 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.547317 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.547541 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.548152 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9c" event={"ID":"2e90df07-d5ed-42b7-a3d8-de62235b551d","Type":"ContainerStarted","Data":"ab4b960f985f27e5759720d30b4623a6661805ceed0e606c66a33e102b37cf94"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.549056 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.549245 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.549404 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.549736 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.549952 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.550140 4767 status_manager.go:851] "Failed to get status for pod" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" pod="openshift-marketplace/redhat-marketplace-bfc9c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bfc9c\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.550340 4767 status_manager.go:851] "Failed to get status for pod" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" pod="openshift-marketplace/redhat-operators-k5t94" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-k5t94\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.550527 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.550708 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.552283 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcqfz" event={"ID":"eff110e4-7a33-4782-86e1-efff7c646e6f","Type":"ContainerStarted","Data":"78c683f03e61ce4c8ade51936d01fcc138f7aaf94c69a75adb014d646d82705e"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.552745 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.552916 4767 status_manager.go:851] "Failed to get status for pod" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" pod="openshift-marketplace/redhat-marketplace-bfc9c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bfc9c\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.553141 4767 status_manager.go:851] "Failed to get status for pod" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" pod="openshift-marketplace/redhat-operators-k5t94" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-k5t94\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.556824 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.557094 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.557633 4767 status_manager.go:851] "Failed to get status for pod" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" pod="openshift-marketplace/redhat-marketplace-qcqfz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qcqfz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.557851 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.558096 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.558421 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.558902 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.560037 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.560126 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"55d1c3692fe4c93fd1be343b9271c59c1ba6491c6ff68bd8a5eb29cab41a5a52"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.561838 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.562176 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.562419 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.562794 4767 status_manager.go:851] "Failed to get status for pod" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" pod="openshift-marketplace/redhat-marketplace-bfc9c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bfc9c\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.563051 4767 status_manager.go:851] "Failed to get status for pod" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" pod="openshift-marketplace/redhat-operators-k5t94" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-k5t94\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.563509 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.563894 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.564119 4767 status_manager.go:851] "Failed to get status for pod" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" pod="openshift-marketplace/redhat-marketplace-qcqfz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qcqfz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.564355 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.564621 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.566625 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"34c635ea69766969f7f4b7e505bfdf39a502ec67ed786aa17cc6f9134cc202ab"} Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.567387 4767 status_manager.go:851] "Failed to get status for pod" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.567604 4767 status_manager.go:851] "Failed to get status for pod" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" pod="openshift-marketplace/community-operators-4lhmz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-4lhmz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.567762 4767 status_manager.go:851] "Failed to get status for pod" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/pods/machine-config-daemon-skvzp\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.567916 4767 status_manager.go:851] "Failed to get status for pod" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" pod="openshift-marketplace/redhat-marketplace-qcqfz" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-qcqfz\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.568053 4767 status_manager.go:851] "Failed to get status for pod" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" pod="openshift-marketplace/redhat-operators-ns2sx" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-ns2sx\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.568192 4767 status_manager.go:851] "Failed to get status for pod" podUID="c91bd017-c929-4891-9118-95e20ef61238" pod="openshift-marketplace/certified-operators-86rk5" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-86rk5\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.568357 4767 status_manager.go:851] "Failed to get status for pod" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" pod="openshift-marketplace/community-operators-6w58h" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-6w58h\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.568505 4767 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.568669 4767 status_manager.go:851] "Failed to get status for pod" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" pod="openshift-marketplace/certified-operators-gft5f" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-gft5f\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.568816 4767 status_manager.go:851] "Failed to get status for pod" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" pod="openshift-marketplace/redhat-marketplace-bfc9c" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-bfc9c\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:03 crc kubenswrapper[4767]: I0128 18:34:03.569005 4767 status_manager.go:851] "Failed to get status for pod" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" pod="openshift-marketplace/redhat-operators-k5t94" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-k5t94\": dial tcp 38.102.83.110:6443: connect: connection refused" Jan 28 18:34:04 crc kubenswrapper[4767]: I0128 18:34:04.573234 4767 generic.go:334] "Generic (PLEG): container finished" podID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerID="abce2e2a228b450d143edb1f5d104ee5c697c98b65ac8b76f3b735b8d5161483" exitCode=0 Jan 28 18:34:04 crc kubenswrapper[4767]: I0128 18:34:04.573465 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k5t94" event={"ID":"3482059d-fb54-44c1-8fd4-b4ca29e633e4","Type":"ContainerDied","Data":"abce2e2a228b450d143edb1f5d104ee5c697c98b65ac8b76f3b735b8d5161483"} Jan 28 18:34:04 crc kubenswrapper[4767]: I0128 18:34:04.579594 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3658aad3f863b44a33d686386cad3cc9c3c11a9223cf28dd0cd9349f3a8cd8d6"} Jan 28 18:34:04 crc kubenswrapper[4767]: I0128 18:34:04.579646 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"10337c83dcb0b7ffe789cf46f3507c6405f31a5b42c07a60a5b90fb33ae9b277"} Jan 28 18:34:04 crc kubenswrapper[4767]: I0128 18:34:04.579659 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"a7f57c402c896ec245f2b26e2e16837c0b384c128103df73fbd0005497d55c56"} Jan 28 18:34:04 crc kubenswrapper[4767]: I0128 18:34:04.588763 4767 generic.go:334] "Generic (PLEG): container finished" podID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerID="d2c3c4c294e1f04ffbbb5488a7c5de85b971fc91ff8105e19d696958350c523c" exitCode=0 Jan 28 18:34:04 crc kubenswrapper[4767]: I0128 18:34:04.588887 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lhmz" event={"ID":"8d36dd2c-a6e9-4369-aae5-c657695233a5","Type":"ContainerDied","Data":"d2c3c4c294e1f04ffbbb5488a7c5de85b971fc91ff8105e19d696958350c523c"} Jan 28 18:34:06 crc kubenswrapper[4767]: I0128 18:34:06.626937 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"2bfcd59ff223d192cfc0dc471c09c2b630a215cc0f6cb28b0245a7dd829c7656"} Jan 28 18:34:06 crc kubenswrapper[4767]: I0128 18:34:06.627314 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5aedc4569f88cba7e91fd839fb9c153ba7bd1f07cc5f7ea5518e6158256a84d1"} Jan 28 18:34:06 crc kubenswrapper[4767]: I0128 18:34:06.627588 4767 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:06 crc kubenswrapper[4767]: I0128 18:34:06.627619 4767 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:06 crc kubenswrapper[4767]: I0128 18:34:06.627841 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:34:07 crc kubenswrapper[4767]: I0128 18:34:07.633676 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k5t94" event={"ID":"3482059d-fb54-44c1-8fd4-b4ca29e633e4","Type":"ContainerStarted","Data":"3af210b9d5bf2d5a5b6e53610ae2407a11a993242853daf544c28a7001212a54"} Jan 28 18:34:07 crc kubenswrapper[4767]: I0128 18:34:07.635446 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lhmz" event={"ID":"8d36dd2c-a6e9-4369-aae5-c657695233a5","Type":"ContainerStarted","Data":"c63fbac1803dff0e8269dd74bb8c36afbc0863bdf1249d244fc4e1c380fedd94"} Jan 28 18:34:07 crc kubenswrapper[4767]: I0128 18:34:07.819272 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:34:07 crc kubenswrapper[4767]: I0128 18:34:07.819343 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:34:07 crc kubenswrapper[4767]: I0128 18:34:07.828760 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:34:08 crc kubenswrapper[4767]: I0128 18:34:08.288852 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:34:08 crc kubenswrapper[4767]: I0128 18:34:08.293030 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:34:08 crc kubenswrapper[4767]: I0128 18:34:08.641025 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:34:09 crc kubenswrapper[4767]: I0128 18:34:09.831290 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:34:09 crc kubenswrapper[4767]: I0128 18:34:09.831346 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.005132 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.005225 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.436008 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.436466 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.486668 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.486956 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.510147 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.510498 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.568343 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.577746 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.702450 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.711477 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:34:10 crc kubenswrapper[4767]: I0128 18:34:10.727964 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:34:11 crc kubenswrapper[4767]: I0128 18:34:11.638506 4767 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:34:11 crc kubenswrapper[4767]: I0128 18:34:11.695147 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:34:11 crc kubenswrapper[4767]: I0128 18:34:11.848158 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:34:11 crc kubenswrapper[4767]: I0128 18:34:11.848227 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:34:11 crc kubenswrapper[4767]: I0128 18:34:11.886560 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.229539 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.229586 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.286414 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.662324 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-vrzqb_ef543e1b-8068-4ea3-b32a-61027b32e95d/approver/0.log" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.662664 4767 generic.go:334] "Generic (PLEG): container finished" podID="ef543e1b-8068-4ea3-b32a-61027b32e95d" containerID="01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1" exitCode=1 Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.662807 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerDied","Data":"01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1"} Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.663242 4767 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.663257 4767 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.663982 4767 scope.go:117] "RemoveContainer" containerID="01ed7cd6b38359f3dc858ff1c18e27d392e8e577097a6d29d32dddcfb1bacba1" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.674335 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.698015 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="db72fd58-c2c5-44d5-89f7-d27c072a536e" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.729560 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:34:12 crc kubenswrapper[4767]: I0128 18:34:12.729636 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.210482 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.210542 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.251274 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.645236 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.645286 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.670380 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-network-node-identity_network-node-identity-vrzqb_ef543e1b-8068-4ea3-b32a-61027b32e95d/approver/0.log" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.670772 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b69770743856702b134367efb6cc6304889d03a3b0fd25e6b7db0ea8e1ae2953"} Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.672350 4767 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.672373 4767 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="99bd7dda-cedd-4898-920b-f77c5b0dd10e" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.691299 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.723746 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:34:13 crc kubenswrapper[4767]: I0128 18:34:13.736177 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:34:14 crc kubenswrapper[4767]: I0128 18:34:14.838635 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="db72fd58-c2c5-44d5-89f7-d27c072a536e" Jan 28 18:34:19 crc kubenswrapper[4767]: I0128 18:34:19.255333 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 28 18:34:33 crc kubenswrapper[4767]: I0128 18:34:33.651674 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 28 18:34:33 crc kubenswrapper[4767]: I0128 18:34:33.770702 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 28 18:34:33 crc kubenswrapper[4767]: I0128 18:34:33.914243 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 28 18:34:34 crc kubenswrapper[4767]: I0128 18:34:34.301506 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 28 18:34:34 crc kubenswrapper[4767]: I0128 18:34:34.807461 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 28 18:34:36 crc kubenswrapper[4767]: I0128 18:34:36.387284 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 28 18:34:36 crc kubenswrapper[4767]: I0128 18:34:36.651377 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 28 18:34:36 crc kubenswrapper[4767]: I0128 18:34:36.994810 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 28 18:34:37 crc kubenswrapper[4767]: I0128 18:34:37.244340 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 28 18:34:37 crc kubenswrapper[4767]: I0128 18:34:37.549548 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 28 18:34:37 crc kubenswrapper[4767]: I0128 18:34:37.627150 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.047491 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.208303 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.471111 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.576055 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.578722 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.802852 4767 generic.go:334] "Generic (PLEG): container finished" podID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerID="533e08262d8663a6db96c61b4deefb0a798033bb5ec7413e4e044b61c4d094f0" exitCode=0 Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.802914 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" event={"ID":"b57f864b-8ab2-499a-a47e-b4a4c62842e7","Type":"ContainerDied","Data":"533e08262d8663a6db96c61b4deefb0a798033bb5ec7413e4e044b61c4d094f0"} Jan 28 18:34:38 crc kubenswrapper[4767]: I0128 18:34:38.803466 4767 scope.go:117] "RemoveContainer" containerID="533e08262d8663a6db96c61b4deefb0a798033bb5ec7413e4e044b61c4d094f0" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.313012 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.483757 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.486312 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.659282 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.732249 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.811636 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-gxsvp_b57f864b-8ab2-499a-a47e-b4a4c62842e7/marketplace-operator/1.log" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.812068 4767 generic.go:334] "Generic (PLEG): container finished" podID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerID="8ba8203a908a2daf68edc5dec05811ae867d067dfcda31989ef6475b995f77fc" exitCode=1 Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.812106 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" event={"ID":"b57f864b-8ab2-499a-a47e-b4a4c62842e7","Type":"ContainerDied","Data":"8ba8203a908a2daf68edc5dec05811ae867d067dfcda31989ef6475b995f77fc"} Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.812167 4767 scope.go:117] "RemoveContainer" containerID="533e08262d8663a6db96c61b4deefb0a798033bb5ec7413e4e044b61c4d094f0" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.812867 4767 scope.go:117] "RemoveContainer" containerID="8ba8203a908a2daf68edc5dec05811ae867d067dfcda31989ef6475b995f77fc" Jan 28 18:34:39 crc kubenswrapper[4767]: E0128 18:34:39.813076 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-gxsvp_openshift-marketplace(b57f864b-8ab2-499a-a47e-b4a4c62842e7)\"" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" Jan 28 18:34:39 crc kubenswrapper[4767]: I0128 18:34:39.953901 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.049844 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.050123 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.133149 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.137437 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.157177 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.273398 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.419282 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.655095 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.818648 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-gxsvp_b57f864b-8ab2-499a-a47e-b4a4c62842e7/marketplace-operator/1.log" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.819133 4767 scope.go:117] "RemoveContainer" containerID="8ba8203a908a2daf68edc5dec05811ae867d067dfcda31989ef6475b995f77fc" Jan 28 18:34:40 crc kubenswrapper[4767]: E0128 18:34:40.819333 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-gxsvp_openshift-marketplace(b57f864b-8ab2-499a-a47e-b4a4c62842e7)\"" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.924878 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.935027 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 28 18:34:40 crc kubenswrapper[4767]: I0128 18:34:40.943760 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.121533 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.216614 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.296084 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.331856 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.394595 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.415434 4767 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.465336 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.824070 4767 scope.go:117] "RemoveContainer" containerID="8ba8203a908a2daf68edc5dec05811ae867d067dfcda31989ef6475b995f77fc" Jan 28 18:34:41 crc kubenswrapper[4767]: E0128 18:34:41.824314 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-gxsvp_openshift-marketplace(b57f864b-8ab2-499a-a47e-b4a4c62842e7)\"" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" Jan 28 18:34:41 crc kubenswrapper[4767]: I0128 18:34:41.954570 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 28 18:34:42 crc kubenswrapper[4767]: I0128 18:34:42.484618 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 28 18:34:42 crc kubenswrapper[4767]: I0128 18:34:42.525137 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 28 18:34:42 crc kubenswrapper[4767]: I0128 18:34:42.599733 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 28 18:34:42 crc kubenswrapper[4767]: I0128 18:34:42.801319 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 28 18:34:42 crc kubenswrapper[4767]: I0128 18:34:42.906668 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 28 18:34:43 crc kubenswrapper[4767]: I0128 18:34:43.033514 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 28 18:34:43 crc kubenswrapper[4767]: I0128 18:34:43.117689 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 28 18:34:43 crc kubenswrapper[4767]: I0128 18:34:43.209326 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 28 18:34:43 crc kubenswrapper[4767]: I0128 18:34:43.218003 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 28 18:34:43 crc kubenswrapper[4767]: I0128 18:34:43.314391 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 28 18:34:43 crc kubenswrapper[4767]: I0128 18:34:43.318927 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 28 18:34:43 crc kubenswrapper[4767]: I0128 18:34:43.474879 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 28 18:34:44 crc kubenswrapper[4767]: I0128 18:34:44.042548 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 28 18:34:44 crc kubenswrapper[4767]: I0128 18:34:44.077841 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 28 18:34:44 crc kubenswrapper[4767]: I0128 18:34:44.703768 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 28 18:34:44 crc kubenswrapper[4767]: I0128 18:34:44.789061 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.000783 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.020870 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.091224 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.177035 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.180654 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.246517 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.517148 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.775182 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 28 18:34:45 crc kubenswrapper[4767]: I0128 18:34:45.960358 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.322001 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.334425 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.405857 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.486724 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.589930 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.596768 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.754373 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:34:46 crc kubenswrapper[4767]: I0128 18:34:46.807307 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 28 18:34:47 crc kubenswrapper[4767]: I0128 18:34:47.192580 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 28 18:34:47 crc kubenswrapper[4767]: I0128 18:34:47.243008 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 28 18:34:47 crc kubenswrapper[4767]: I0128 18:34:47.355590 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 28 18:34:47 crc kubenswrapper[4767]: I0128 18:34:47.535503 4767 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 28 18:34:47 crc kubenswrapper[4767]: I0128 18:34:47.757124 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 28 18:34:47 crc kubenswrapper[4767]: I0128 18:34:47.991914 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 28 18:34:48 crc kubenswrapper[4767]: I0128 18:34:48.020327 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 28 18:34:48 crc kubenswrapper[4767]: I0128 18:34:48.498473 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 28 18:34:48 crc kubenswrapper[4767]: I0128 18:34:48.631146 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 28 18:34:48 crc kubenswrapper[4767]: I0128 18:34:48.760628 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 28 18:34:48 crc kubenswrapper[4767]: I0128 18:34:48.934158 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:34:48 crc kubenswrapper[4767]: I0128 18:34:48.950534 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.074249 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.121200 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.123261 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.257188 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.483511 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.777553 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.939685 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 28 18:34:49 crc kubenswrapper[4767]: I0128 18:34:49.964080 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 28 18:34:50 crc kubenswrapper[4767]: I0128 18:34:50.061365 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 28 18:34:50 crc kubenswrapper[4767]: I0128 18:34:50.215587 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 28 18:34:50 crc kubenswrapper[4767]: I0128 18:34:50.374784 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 28 18:34:50 crc kubenswrapper[4767]: I0128 18:34:50.469898 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 28 18:34:50 crc kubenswrapper[4767]: I0128 18:34:50.554329 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 28 18:34:50 crc kubenswrapper[4767]: I0128 18:34:50.777843 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 28 18:34:51 crc kubenswrapper[4767]: I0128 18:34:51.128988 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 28 18:34:51 crc kubenswrapper[4767]: I0128 18:34:51.420468 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 28 18:34:51 crc kubenswrapper[4767]: I0128 18:34:51.536635 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 28 18:34:51 crc kubenswrapper[4767]: I0128 18:34:51.737343 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 28 18:34:51 crc kubenswrapper[4767]: I0128 18:34:51.817069 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 28 18:34:51 crc kubenswrapper[4767]: I0128 18:34:51.874321 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 28 18:34:51 crc kubenswrapper[4767]: I0128 18:34:51.928360 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.230395 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.253956 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.276255 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.324180 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.422575 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.432794 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.457097 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.477681 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.583816 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.714078 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.738072 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 28 18:34:52 crc kubenswrapper[4767]: I0128 18:34:52.795424 4767 scope.go:117] "RemoveContainer" containerID="8ba8203a908a2daf68edc5dec05811ae867d067dfcda31989ef6475b995f77fc" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.130828 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.202566 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.287056 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.431137 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.499453 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.554789 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.893096 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-gxsvp_b57f864b-8ab2-499a-a47e-b4a4c62842e7/marketplace-operator/1.log" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.893149 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" event={"ID":"b57f864b-8ab2-499a-a47e-b4a4c62842e7","Type":"ContainerStarted","Data":"81b671827e3ee70774b7ba248bcd90381b82178dc2057126e88ff50973d47339"} Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.893550 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:34:53 crc kubenswrapper[4767]: I0128 18:34:53.900118 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.073067 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.258337 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.271876 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.592898 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.603811 4767 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.619192 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.793124 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 28 18:34:54 crc kubenswrapper[4767]: I0128 18:34:54.835449 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 28 18:34:55 crc kubenswrapper[4767]: I0128 18:34:55.163863 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 28 18:34:55 crc kubenswrapper[4767]: I0128 18:34:55.262713 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 28 18:34:55 crc kubenswrapper[4767]: I0128 18:34:55.693680 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 28 18:34:55 crc kubenswrapper[4767]: I0128 18:34:55.694148 4767 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 28 18:34:55 crc kubenswrapper[4767]: I0128 18:34:55.833003 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 28 18:34:55 crc kubenswrapper[4767]: I0128 18:34:55.843241 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.073046 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.226882 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.264720 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.353954 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.588422 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.604264 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.615536 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 28 18:34:56 crc kubenswrapper[4767]: I0128 18:34:56.885900 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.022748 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.209327 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.251027 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.300331 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.322382 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.402201 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.405522 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.501458 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.530923 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.569376 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.583315 4767 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.739416 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.770710 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.781128 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.819473 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 28 18:34:57 crc kubenswrapper[4767]: I0128 18:34:57.953514 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 28 18:34:58 crc kubenswrapper[4767]: I0128 18:34:58.155351 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 28 18:34:58 crc kubenswrapper[4767]: I0128 18:34:58.201479 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 28 18:34:58 crc kubenswrapper[4767]: I0128 18:34:58.321833 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 28 18:34:58 crc kubenswrapper[4767]: I0128 18:34:58.561905 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 28 18:34:58 crc kubenswrapper[4767]: I0128 18:34:58.733156 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 28 18:34:58 crc kubenswrapper[4767]: I0128 18:34:58.860068 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 28 18:34:58 crc kubenswrapper[4767]: I0128 18:34:58.965915 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 28 18:34:59 crc kubenswrapper[4767]: I0128 18:34:59.039868 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 28 18:34:59 crc kubenswrapper[4767]: I0128 18:34:59.374650 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 28 18:34:59 crc kubenswrapper[4767]: I0128 18:34:59.681265 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 28 18:34:59 crc kubenswrapper[4767]: I0128 18:34:59.736932 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 28 18:34:59 crc kubenswrapper[4767]: I0128 18:34:59.792479 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 28 18:34:59 crc kubenswrapper[4767]: I0128 18:34:59.794792 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 28 18:34:59 crc kubenswrapper[4767]: I0128 18:34:59.971363 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 28 18:35:00 crc kubenswrapper[4767]: I0128 18:35:00.084685 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:35:00 crc kubenswrapper[4767]: I0128 18:35:00.090041 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:35:00 crc kubenswrapper[4767]: I0128 18:35:00.226641 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:35:00 crc kubenswrapper[4767]: I0128 18:35:00.353645 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 28 18:35:00 crc kubenswrapper[4767]: I0128 18:35:00.361964 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 28 18:35:00 crc kubenswrapper[4767]: I0128 18:35:00.544464 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 28 18:35:01 crc kubenswrapper[4767]: I0128 18:35:01.995427 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 28 18:35:02 crc kubenswrapper[4767]: I0128 18:35:02.234491 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 28 18:35:02 crc kubenswrapper[4767]: I0128 18:35:02.699063 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 28 18:35:02 crc kubenswrapper[4767]: I0128 18:35:02.876739 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 28 18:35:03 crc kubenswrapper[4767]: I0128 18:35:03.312698 4767 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 28 18:35:08 crc kubenswrapper[4767]: I0128 18:35:08.129296 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 28 18:35:08 crc kubenswrapper[4767]: I0128 18:35:08.140091 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 28 18:35:08 crc kubenswrapper[4767]: I0128 18:35:08.222405 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:09 crc kubenswrapper[4767]: I0128 18:35:09.369009 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 28 18:35:10 crc kubenswrapper[4767]: I0128 18:35:10.222869 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 28 18:35:10 crc kubenswrapper[4767]: I0128 18:35:10.293009 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 28 18:35:11 crc kubenswrapper[4767]: I0128 18:35:11.172750 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 28 18:35:11 crc kubenswrapper[4767]: I0128 18:35:11.245255 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 28 18:35:12 crc kubenswrapper[4767]: I0128 18:35:12.255455 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 28 18:35:12 crc kubenswrapper[4767]: I0128 18:35:12.548573 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 28 18:35:13 crc kubenswrapper[4767]: I0128 18:35:13.134872 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 28 18:35:13 crc kubenswrapper[4767]: I0128 18:35:13.195040 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 28 18:35:13 crc kubenswrapper[4767]: I0128 18:35:13.559502 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 28 18:35:13 crc kubenswrapper[4767]: I0128 18:35:13.682884 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 28 18:35:13 crc kubenswrapper[4767]: I0128 18:35:13.819968 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 28 18:35:15 crc kubenswrapper[4767]: I0128 18:35:15.240285 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:15 crc kubenswrapper[4767]: I0128 18:35:15.275406 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:35:16 crc kubenswrapper[4767]: I0128 18:35:16.903147 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 28 18:35:16 crc kubenswrapper[4767]: I0128 18:35:16.918848 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 28 18:35:17 crc kubenswrapper[4767]: I0128 18:35:17.312980 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 28 18:35:17 crc kubenswrapper[4767]: I0128 18:35:17.946394 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 28 18:35:18 crc kubenswrapper[4767]: I0128 18:35:18.086390 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 28 18:35:19 crc kubenswrapper[4767]: I0128 18:35:19.364174 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 28 18:35:19 crc kubenswrapper[4767]: I0128 18:35:19.505700 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 28 18:35:20 crc kubenswrapper[4767]: I0128 18:35:20.787313 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 28 18:35:20 crc kubenswrapper[4767]: I0128 18:35:20.940084 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 28 18:35:21 crc kubenswrapper[4767]: I0128 18:35:21.120630 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 28 18:35:21 crc kubenswrapper[4767]: I0128 18:35:21.607993 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 28 18:35:22 crc kubenswrapper[4767]: I0128 18:35:22.286456 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 28 18:35:23 crc kubenswrapper[4767]: I0128 18:35:23.259672 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 28 18:35:23 crc kubenswrapper[4767]: I0128 18:35:23.825972 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 28 18:35:24 crc kubenswrapper[4767]: I0128 18:35:24.870512 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 28 18:35:25 crc kubenswrapper[4767]: I0128 18:35:25.025355 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 28 18:35:25 crc kubenswrapper[4767]: I0128 18:35:25.110032 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 28 18:35:25 crc kubenswrapper[4767]: I0128 18:35:25.513142 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.283078 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.511924 4767 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.512339 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-86rk5" podStartSLOduration=88.079494416 podStartE2EDuration="3m47.512319139s" podCreationTimestamp="2026-01-28 18:31:39 +0000 UTC" firstStartedPulling="2026-01-28 18:31:42.06043279 +0000 UTC m=+108.024615664" lastFinishedPulling="2026-01-28 18:34:01.493257473 +0000 UTC m=+247.457440387" observedRunningTime="2026-01-28 18:34:10.571329824 +0000 UTC m=+256.535512718" watchObservedRunningTime="2026-01-28 18:35:26.512319139 +0000 UTC m=+332.476502013" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.512669 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-k5t94" podStartSLOduration=83.145833369 podStartE2EDuration="3m43.5126607s" podCreationTimestamp="2026-01-28 18:31:43 +0000 UTC" firstStartedPulling="2026-01-28 18:31:46.307520942 +0000 UTC m=+112.271703816" lastFinishedPulling="2026-01-28 18:34:06.674348273 +0000 UTC m=+252.638531147" observedRunningTime="2026-01-28 18:34:10.651045588 +0000 UTC m=+256.615228472" watchObservedRunningTime="2026-01-28 18:35:26.5126607 +0000 UTC m=+332.476843594" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.513397 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-gft5f" podStartSLOduration=93.114487717 podStartE2EDuration="3m47.513390794s" podCreationTimestamp="2026-01-28 18:31:39 +0000 UTC" firstStartedPulling="2026-01-28 18:31:42.076365269 +0000 UTC m=+108.040548143" lastFinishedPulling="2026-01-28 18:33:56.475268336 +0000 UTC m=+242.439451220" observedRunningTime="2026-01-28 18:34:10.384574287 +0000 UTC m=+256.348757161" watchObservedRunningTime="2026-01-28 18:35:26.513390794 +0000 UTC m=+332.477573668" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.514164 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-bfc9c" podStartSLOduration=87.11624607 podStartE2EDuration="3m45.514159578s" podCreationTimestamp="2026-01-28 18:31:41 +0000 UTC" firstStartedPulling="2026-01-28 18:31:44.267746595 +0000 UTC m=+110.231929459" lastFinishedPulling="2026-01-28 18:34:02.665660053 +0000 UTC m=+248.629842967" observedRunningTime="2026-01-28 18:34:10.40314951 +0000 UTC m=+256.367332384" watchObservedRunningTime="2026-01-28 18:35:26.514159578 +0000 UTC m=+332.478342452" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.514509 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6w58h" podStartSLOduration=85.900510326 podStartE2EDuration="3m46.514505238s" podCreationTimestamp="2026-01-28 18:31:40 +0000 UTC" firstStartedPulling="2026-01-28 18:31:42.053671653 +0000 UTC m=+108.017854527" lastFinishedPulling="2026-01-28 18:34:02.667666525 +0000 UTC m=+248.631849439" observedRunningTime="2026-01-28 18:34:10.593025245 +0000 UTC m=+256.557208119" watchObservedRunningTime="2026-01-28 18:35:26.514505238 +0000 UTC m=+332.478688112" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.516698 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ns2sx" podStartSLOduration=86.117154839 podStartE2EDuration="3m44.516692008s" podCreationTimestamp="2026-01-28 18:31:42 +0000 UTC" firstStartedPulling="2026-01-28 18:31:44.267857318 +0000 UTC m=+110.232040202" lastFinishedPulling="2026-01-28 18:34:02.667394477 +0000 UTC m=+248.631577371" observedRunningTime="2026-01-28 18:34:10.54957345 +0000 UTC m=+256.513756324" watchObservedRunningTime="2026-01-28 18:35:26.516692008 +0000 UTC m=+332.480874882" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.517006 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4lhmz" podStartSLOduration=83.20119341 podStartE2EDuration="3m47.517000867s" podCreationTimestamp="2026-01-28 18:31:39 +0000 UTC" firstStartedPulling="2026-01-28 18:31:42.203085453 +0000 UTC m=+108.167268327" lastFinishedPulling="2026-01-28 18:34:06.51889291 +0000 UTC m=+252.483075784" observedRunningTime="2026-01-28 18:34:10.675758254 +0000 UTC m=+256.639941128" watchObservedRunningTime="2026-01-28 18:35:26.517000867 +0000 UTC m=+332.481183741" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.517092 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qcqfz" podStartSLOduration=96.72322238 podStartE2EDuration="3m45.51708795s" podCreationTimestamp="2026-01-28 18:31:41 +0000 UTC" firstStartedPulling="2026-01-28 18:31:43.172090588 +0000 UTC m=+109.136273462" lastFinishedPulling="2026-01-28 18:33:51.965956158 +0000 UTC m=+237.930139032" observedRunningTime="2026-01-28 18:34:10.523892494 +0000 UTC m=+256.488075378" watchObservedRunningTime="2026-01-28 18:35:26.51708795 +0000 UTC m=+332.481270824" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.517794 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.517846 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc","openshift-kube-apiserver/kube-apiserver-crc"] Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.517878 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g","openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb"] Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.518155 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" podUID="49ab6080-e934-47a4-aea4-a437401a6b1d" containerName="route-controller-manager" containerID="cri-o://734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8" gracePeriod=30 Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.521516 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" podUID="de9c3762-ae24-4aca-91c2-b56f539545de" containerName="controller-manager" containerID="cri-o://0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1" gracePeriod=30 Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.527685 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.545497 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=75.545473043 podStartE2EDuration="1m15.545473043s" podCreationTimestamp="2026-01-28 18:34:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:35:26.543062187 +0000 UTC m=+332.507245071" watchObservedRunningTime="2026-01-28 18:35:26.545473043 +0000 UTC m=+332.509655917" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.607298 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=7.6072637279999995 podStartE2EDuration="7.607263728s" podCreationTimestamp="2026-01-28 18:35:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:35:26.568056484 +0000 UTC m=+332.532239378" watchObservedRunningTime="2026-01-28 18:35:26.607263728 +0000 UTC m=+332.571446612" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.922416 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.929398 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:35:26 crc kubenswrapper[4767]: I0128 18:35:26.976677 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.068429 4767 generic.go:334] "Generic (PLEG): container finished" podID="de9c3762-ae24-4aca-91c2-b56f539545de" containerID="0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1" exitCode=0 Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.068546 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" event={"ID":"de9c3762-ae24-4aca-91c2-b56f539545de","Type":"ContainerDied","Data":"0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1"} Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.068642 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" event={"ID":"de9c3762-ae24-4aca-91c2-b56f539545de","Type":"ContainerDied","Data":"68eb94a05e138b76731d5c38deb39e237dd55546bde4f109ee0a262fd4275e41"} Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.068675 4767 scope.go:117] "RemoveContainer" containerID="0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.068940 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.070320 4767 generic.go:334] "Generic (PLEG): container finished" podID="49ab6080-e934-47a4-aea4-a437401a6b1d" containerID="734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8" exitCode=0 Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.070406 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" event={"ID":"49ab6080-e934-47a4-aea4-a437401a6b1d","Type":"ContainerDied","Data":"734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8"} Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.070552 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" event={"ID":"49ab6080-e934-47a4-aea4-a437401a6b1d","Type":"ContainerDied","Data":"c10517a1ff580e9413a8fe0c2f732102fa8454a6c64ede0226fcc140bdb1ef7f"} Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.070468 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085035 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49ab6080-e934-47a4-aea4-a437401a6b1d-serving-cert\") pod \"49ab6080-e934-47a4-aea4-a437401a6b1d\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085113 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-client-ca\") pod \"de9c3762-ae24-4aca-91c2-b56f539545de\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085176 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4264m\" (UniqueName: \"kubernetes.io/projected/49ab6080-e934-47a4-aea4-a437401a6b1d-kube-api-access-4264m\") pod \"49ab6080-e934-47a4-aea4-a437401a6b1d\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085248 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-config\") pod \"de9c3762-ae24-4aca-91c2-b56f539545de\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085283 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mdsjp\" (UniqueName: \"kubernetes.io/projected/de9c3762-ae24-4aca-91c2-b56f539545de-kube-api-access-mdsjp\") pod \"de9c3762-ae24-4aca-91c2-b56f539545de\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085314 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-client-ca\") pod \"49ab6080-e934-47a4-aea4-a437401a6b1d\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085338 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-proxy-ca-bundles\") pod \"de9c3762-ae24-4aca-91c2-b56f539545de\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085359 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de9c3762-ae24-4aca-91c2-b56f539545de-serving-cert\") pod \"de9c3762-ae24-4aca-91c2-b56f539545de\" (UID: \"de9c3762-ae24-4aca-91c2-b56f539545de\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.085391 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-config\") pod \"49ab6080-e934-47a4-aea4-a437401a6b1d\" (UID: \"49ab6080-e934-47a4-aea4-a437401a6b1d\") " Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.087609 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-config" (OuterVolumeSpecName: "config") pod "49ab6080-e934-47a4-aea4-a437401a6b1d" (UID: "49ab6080-e934-47a4-aea4-a437401a6b1d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.087921 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-client-ca" (OuterVolumeSpecName: "client-ca") pod "de9c3762-ae24-4aca-91c2-b56f539545de" (UID: "de9c3762-ae24-4aca-91c2-b56f539545de"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.088304 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-client-ca" (OuterVolumeSpecName: "client-ca") pod "49ab6080-e934-47a4-aea4-a437401a6b1d" (UID: "49ab6080-e934-47a4-aea4-a437401a6b1d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.089093 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "de9c3762-ae24-4aca-91c2-b56f539545de" (UID: "de9c3762-ae24-4aca-91c2-b56f539545de"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.089123 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-config" (OuterVolumeSpecName: "config") pod "de9c3762-ae24-4aca-91c2-b56f539545de" (UID: "de9c3762-ae24-4aca-91c2-b56f539545de"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.090034 4767 scope.go:117] "RemoveContainer" containerID="0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1" Jan 28 18:35:27 crc kubenswrapper[4767]: E0128 18:35:27.090631 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1\": container with ID starting with 0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1 not found: ID does not exist" containerID="0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.090691 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1"} err="failed to get container status \"0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1\": rpc error: code = NotFound desc = could not find container \"0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1\": container with ID starting with 0e7d1438d83b42ae018f2a1032c83dda703a0d0f217b9a51e7ec7eda4f45f3f1 not found: ID does not exist" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.090725 4767 scope.go:117] "RemoveContainer" containerID="734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.094597 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de9c3762-ae24-4aca-91c2-b56f539545de-kube-api-access-mdsjp" (OuterVolumeSpecName: "kube-api-access-mdsjp") pod "de9c3762-ae24-4aca-91c2-b56f539545de" (UID: "de9c3762-ae24-4aca-91c2-b56f539545de"). InnerVolumeSpecName "kube-api-access-mdsjp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.094652 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ab6080-e934-47a4-aea4-a437401a6b1d-kube-api-access-4264m" (OuterVolumeSpecName: "kube-api-access-4264m") pod "49ab6080-e934-47a4-aea4-a437401a6b1d" (UID: "49ab6080-e934-47a4-aea4-a437401a6b1d"). InnerVolumeSpecName "kube-api-access-4264m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.094806 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49ab6080-e934-47a4-aea4-a437401a6b1d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "49ab6080-e934-47a4-aea4-a437401a6b1d" (UID: "49ab6080-e934-47a4-aea4-a437401a6b1d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.094806 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/de9c3762-ae24-4aca-91c2-b56f539545de-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "de9c3762-ae24-4aca-91c2-b56f539545de" (UID: "de9c3762-ae24-4aca-91c2-b56f539545de"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.107101 4767 scope.go:117] "RemoveContainer" containerID="734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8" Jan 28 18:35:27 crc kubenswrapper[4767]: E0128 18:35:27.107728 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8\": container with ID starting with 734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8 not found: ID does not exist" containerID="734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.107774 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8"} err="failed to get container status \"734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8\": rpc error: code = NotFound desc = could not find container \"734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8\": container with ID starting with 734b09dba36d7d7347049859e3110ba59fff3a7895cd1e268c9de8665cce3be8 not found: ID does not exist" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187675 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187717 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49ab6080-e934-47a4-aea4-a437401a6b1d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187732 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187745 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4264m\" (UniqueName: \"kubernetes.io/projected/49ab6080-e934-47a4-aea4-a437401a6b1d-kube-api-access-4264m\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187761 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187772 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mdsjp\" (UniqueName: \"kubernetes.io/projected/de9c3762-ae24-4aca-91c2-b56f539545de-kube-api-access-mdsjp\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187782 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/49ab6080-e934-47a4-aea4-a437401a6b1d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187792 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/de9c3762-ae24-4aca-91c2-b56f539545de-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.187803 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/de9c3762-ae24-4aca-91c2-b56f539545de-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.231048 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.409494 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g"] Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.412987 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-7d5fb7db4c-w4s8g"] Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.416032 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb"] Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.418905 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7956cc4f9c-rt2zb"] Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.526998 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.552355 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.593593 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.701594 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.992329 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf"] Jan 28 18:35:27 crc kubenswrapper[4767]: E0128 18:35:27.992586 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" containerName="installer" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.992605 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" containerName="installer" Jan 28 18:35:27 crc kubenswrapper[4767]: E0128 18:35:27.992617 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="49ab6080-e934-47a4-aea4-a437401a6b1d" containerName="route-controller-manager" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.992624 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="49ab6080-e934-47a4-aea4-a437401a6b1d" containerName="route-controller-manager" Jan 28 18:35:27 crc kubenswrapper[4767]: E0128 18:35:27.992643 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de9c3762-ae24-4aca-91c2-b56f539545de" containerName="controller-manager" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.992649 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="de9c3762-ae24-4aca-91c2-b56f539545de" containerName="controller-manager" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.992811 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="de9c3762-ae24-4aca-91c2-b56f539545de" containerName="controller-manager" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.992828 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="49ab6080-e934-47a4-aea4-a437401a6b1d" containerName="route-controller-manager" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.992841 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc3fefa4-19b5-4725-9a82-ab64b0c735f5" containerName="installer" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.993293 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.996089 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.996273 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.996580 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.996856 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.998396 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:27 crc kubenswrapper[4767]: I0128 18:35:27.998439 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.006312 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf"] Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.099855 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-client-ca\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.100284 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfqxs\" (UniqueName: \"kubernetes.io/projected/1a897999-67ba-4c0e-aada-60d95d9e54b8-kube-api-access-kfqxs\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.100341 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-config\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.100436 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a897999-67ba-4c0e-aada-60d95d9e54b8-serving-cert\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.201273 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-config\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.201322 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a897999-67ba-4c0e-aada-60d95d9e54b8-serving-cert\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.201374 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-client-ca\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.201403 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfqxs\" (UniqueName: \"kubernetes.io/projected/1a897999-67ba-4c0e-aada-60d95d9e54b8-kube-api-access-kfqxs\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.203414 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-client-ca\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.203441 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-config\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.207177 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a897999-67ba-4c0e-aada-60d95d9e54b8-serving-cert\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.225535 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfqxs\" (UniqueName: \"kubernetes.io/projected/1a897999-67ba-4c0e-aada-60d95d9e54b8-kube-api-access-kfqxs\") pod \"route-controller-manager-7546c7d565-jgdxf\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.309969 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.519783 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf"] Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.803479 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ab6080-e934-47a4-aea4-a437401a6b1d" path="/var/lib/kubelet/pods/49ab6080-e934-47a4-aea4-a437401a6b1d/volumes" Jan 28 18:35:28 crc kubenswrapper[4767]: I0128 18:35:28.804464 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de9c3762-ae24-4aca-91c2-b56f539545de" path="/var/lib/kubelet/pods/de9c3762-ae24-4aca-91c2-b56f539545de/volumes" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.034521 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.092592 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" event={"ID":"1a897999-67ba-4c0e-aada-60d95d9e54b8","Type":"ContainerStarted","Data":"d297c1f9b895c79f3aef36347426c8039ddb65213e5611963243e02948e209c3"} Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.092659 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" event={"ID":"1a897999-67ba-4c0e-aada-60d95d9e54b8","Type":"ContainerStarted","Data":"235ee4bbced39ae9a9d4551b9429e31cf69a22c6ee4cbaf55fbc679d9d3f071e"} Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.093007 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.097708 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.103329 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.113867 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" podStartSLOduration=6.113844634 podStartE2EDuration="6.113844634s" podCreationTimestamp="2026-01-28 18:35:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:35:29.112024677 +0000 UTC m=+335.076207561" watchObservedRunningTime="2026-01-28 18:35:29.113844634 +0000 UTC m=+335.078027518" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.328031 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.414005 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.425697 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.478709 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-d8c796598-6klcl"] Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.480146 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.484388 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.486595 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d8c796598-6klcl"] Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.486639 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.487041 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.487159 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.487402 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.487705 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.490736 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.620347 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-serving-cert\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.620413 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-config\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.620458 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cb8wd\" (UniqueName: \"kubernetes.io/projected/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-kube-api-access-cb8wd\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.620507 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-proxy-ca-bundles\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.620578 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-client-ca\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.721531 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-serving-cert\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.721586 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-config\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.721618 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cb8wd\" (UniqueName: \"kubernetes.io/projected/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-kube-api-access-cb8wd\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.721641 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-proxy-ca-bundles\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.721682 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-client-ca\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.723014 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-client-ca\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.723068 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-proxy-ca-bundles\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.723765 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-config\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.730047 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-serving-cert\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.749608 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cb8wd\" (UniqueName: \"kubernetes.io/projected/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-kube-api-access-cb8wd\") pod \"controller-manager-d8c796598-6klcl\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.765087 4767 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.765501 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://b576c8e60ec56d24f4fa773e84c82ca50182158dea49adb0de3372c3d2f9d721" gracePeriod=5 Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.798947 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.838127 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:29 crc kubenswrapper[4767]: I0128 18:35:29.923497 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 28 18:35:30 crc kubenswrapper[4767]: I0128 18:35:30.056755 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-d8c796598-6klcl"] Jan 28 18:35:30 crc kubenswrapper[4767]: W0128 18:35:30.074493 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod962c2b7d_7d34_4e1b_8e6a_6e798e243a27.slice/crio-077b3db1f13aca7cdad2dc085d08503dbf33927633e27273d53259b82d558c83 WatchSource:0}: Error finding container 077b3db1f13aca7cdad2dc085d08503dbf33927633e27273d53259b82d558c83: Status 404 returned error can't find the container with id 077b3db1f13aca7cdad2dc085d08503dbf33927633e27273d53259b82d558c83 Jan 28 18:35:30 crc kubenswrapper[4767]: I0128 18:35:30.103702 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" event={"ID":"962c2b7d-7d34-4e1b-8e6a-6e798e243a27","Type":"ContainerStarted","Data":"077b3db1f13aca7cdad2dc085d08503dbf33927633e27273d53259b82d558c83"} Jan 28 18:35:31 crc kubenswrapper[4767]: I0128 18:35:31.074240 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 28 18:35:31 crc kubenswrapper[4767]: I0128 18:35:31.110147 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" event={"ID":"962c2b7d-7d34-4e1b-8e6a-6e798e243a27","Type":"ContainerStarted","Data":"942ce27bbd3280d93225ddf91c976f3ad8675312db96cb78cd03907ba574b336"} Jan 28 18:35:31 crc kubenswrapper[4767]: I0128 18:35:31.913350 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 28 18:35:31 crc kubenswrapper[4767]: I0128 18:35:31.932869 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 18:35:32 crc kubenswrapper[4767]: I0128 18:35:32.117393 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:32 crc kubenswrapper[4767]: I0128 18:35:32.123542 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:32 crc kubenswrapper[4767]: I0128 18:35:32.146791 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" podStartSLOduration=9.146769316 podStartE2EDuration="9.146769316s" podCreationTimestamp="2026-01-28 18:35:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:35:31.125896753 +0000 UTC m=+337.090079627" watchObservedRunningTime="2026-01-28 18:35:32.146769316 +0000 UTC m=+338.110952190" Jan 28 18:35:32 crc kubenswrapper[4767]: I0128 18:35:32.328885 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 18:35:32 crc kubenswrapper[4767]: I0128 18:35:32.664140 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 28 18:35:34 crc kubenswrapper[4767]: I0128 18:35:34.121855 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 28 18:35:34 crc kubenswrapper[4767]: I0128 18:35:34.466332 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 28 18:35:34 crc kubenswrapper[4767]: I0128 18:35:34.862529 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.136955 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.137452 4767 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="b576c8e60ec56d24f4fa773e84c82ca50182158dea49adb0de3372c3d2f9d721" exitCode=137 Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.339568 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.339684 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.410876 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.519772 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.519934 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.519980 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.519964 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520019 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520029 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520068 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520098 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520254 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520779 4767 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520802 4767 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520814 4767 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.520825 4767 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.534068 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:35:35 crc kubenswrapper[4767]: I0128 18:35:35.622465 4767 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.145456 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.145536 4767 scope.go:117] "RemoveContainer" containerID="b576c8e60ec56d24f4fa773e84c82ca50182158dea49adb0de3372c3d2f9d721" Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.145652 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.802985 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.803326 4767 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.815540 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.815595 4767 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="04d4f526-2f11-45c2-b2cc-1af9c5cae572" Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.821346 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 28 18:35:36 crc kubenswrapper[4767]: I0128 18:35:36.821418 4767 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="04d4f526-2f11-45c2-b2cc-1af9c5cae572" Jan 28 18:35:37 crc kubenswrapper[4767]: I0128 18:35:37.782278 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-d8c796598-6klcl"] Jan 28 18:35:37 crc kubenswrapper[4767]: I0128 18:35:37.782810 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" podUID="962c2b7d-7d34-4e1b-8e6a-6e798e243a27" containerName="controller-manager" containerID="cri-o://942ce27bbd3280d93225ddf91c976f3ad8675312db96cb78cd03907ba574b336" gracePeriod=30 Jan 28 18:35:37 crc kubenswrapper[4767]: I0128 18:35:37.880583 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf"] Jan 28 18:35:37 crc kubenswrapper[4767]: I0128 18:35:37.880900 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" podUID="1a897999-67ba-4c0e-aada-60d95d9e54b8" containerName="route-controller-manager" containerID="cri-o://d297c1f9b895c79f3aef36347426c8039ddb65213e5611963243e02948e209c3" gracePeriod=30 Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.163963 4767 generic.go:334] "Generic (PLEG): container finished" podID="1a897999-67ba-4c0e-aada-60d95d9e54b8" containerID="d297c1f9b895c79f3aef36347426c8039ddb65213e5611963243e02948e209c3" exitCode=0 Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.164028 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" event={"ID":"1a897999-67ba-4c0e-aada-60d95d9e54b8","Type":"ContainerDied","Data":"d297c1f9b895c79f3aef36347426c8039ddb65213e5611963243e02948e209c3"} Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.170049 4767 generic.go:334] "Generic (PLEG): container finished" podID="962c2b7d-7d34-4e1b-8e6a-6e798e243a27" containerID="942ce27bbd3280d93225ddf91c976f3ad8675312db96cb78cd03907ba574b336" exitCode=0 Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.170089 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" event={"ID":"962c2b7d-7d34-4e1b-8e6a-6e798e243a27","Type":"ContainerDied","Data":"942ce27bbd3280d93225ddf91c976f3ad8675312db96cb78cd03907ba574b336"} Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.358761 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.360122 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-client-ca\") pod \"1a897999-67ba-4c0e-aada-60d95d9e54b8\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.360148 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-config\") pod \"1a897999-67ba-4c0e-aada-60d95d9e54b8\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.360175 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfqxs\" (UniqueName: \"kubernetes.io/projected/1a897999-67ba-4c0e-aada-60d95d9e54b8-kube-api-access-kfqxs\") pod \"1a897999-67ba-4c0e-aada-60d95d9e54b8\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.360214 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a897999-67ba-4c0e-aada-60d95d9e54b8-serving-cert\") pod \"1a897999-67ba-4c0e-aada-60d95d9e54b8\" (UID: \"1a897999-67ba-4c0e-aada-60d95d9e54b8\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.361164 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-client-ca" (OuterVolumeSpecName: "client-ca") pod "1a897999-67ba-4c0e-aada-60d95d9e54b8" (UID: "1a897999-67ba-4c0e-aada-60d95d9e54b8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.361185 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-config" (OuterVolumeSpecName: "config") pod "1a897999-67ba-4c0e-aada-60d95d9e54b8" (UID: "1a897999-67ba-4c0e-aada-60d95d9e54b8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.371576 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a897999-67ba-4c0e-aada-60d95d9e54b8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1a897999-67ba-4c0e-aada-60d95d9e54b8" (UID: "1a897999-67ba-4c0e-aada-60d95d9e54b8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.374072 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a897999-67ba-4c0e-aada-60d95d9e54b8-kube-api-access-kfqxs" (OuterVolumeSpecName: "kube-api-access-kfqxs") pod "1a897999-67ba-4c0e-aada-60d95d9e54b8" (UID: "1a897999-67ba-4c0e-aada-60d95d9e54b8"). InnerVolumeSpecName "kube-api-access-kfqxs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.408965 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.460827 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-serving-cert\") pod \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.460872 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-client-ca\") pod \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.460910 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-proxy-ca-bundles\") pod \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.460960 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cb8wd\" (UniqueName: \"kubernetes.io/projected/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-kube-api-access-cb8wd\") pod \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.460975 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-config\") pod \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\" (UID: \"962c2b7d-7d34-4e1b-8e6a-6e798e243a27\") " Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.461099 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.461110 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfqxs\" (UniqueName: \"kubernetes.io/projected/1a897999-67ba-4c0e-aada-60d95d9e54b8-kube-api-access-kfqxs\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.461120 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a897999-67ba-4c0e-aada-60d95d9e54b8-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.461127 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a897999-67ba-4c0e-aada-60d95d9e54b8-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.461999 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "962c2b7d-7d34-4e1b-8e6a-6e798e243a27" (UID: "962c2b7d-7d34-4e1b-8e6a-6e798e243a27"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.462024 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-client-ca" (OuterVolumeSpecName: "client-ca") pod "962c2b7d-7d34-4e1b-8e6a-6e798e243a27" (UID: "962c2b7d-7d34-4e1b-8e6a-6e798e243a27"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.462283 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-config" (OuterVolumeSpecName: "config") pod "962c2b7d-7d34-4e1b-8e6a-6e798e243a27" (UID: "962c2b7d-7d34-4e1b-8e6a-6e798e243a27"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.464190 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-kube-api-access-cb8wd" (OuterVolumeSpecName: "kube-api-access-cb8wd") pod "962c2b7d-7d34-4e1b-8e6a-6e798e243a27" (UID: "962c2b7d-7d34-4e1b-8e6a-6e798e243a27"). InnerVolumeSpecName "kube-api-access-cb8wd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.464280 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "962c2b7d-7d34-4e1b-8e6a-6e798e243a27" (UID: "962c2b7d-7d34-4e1b-8e6a-6e798e243a27"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.561952 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.561994 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cb8wd\" (UniqueName: \"kubernetes.io/projected/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-kube-api-access-cb8wd\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.562013 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.562024 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.562036 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/962c2b7d-7d34-4e1b-8e6a-6e798e243a27-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:38 crc kubenswrapper[4767]: I0128 18:35:38.918363 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.177564 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" event={"ID":"962c2b7d-7d34-4e1b-8e6a-6e798e243a27","Type":"ContainerDied","Data":"077b3db1f13aca7cdad2dc085d08503dbf33927633e27273d53259b82d558c83"} Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.177609 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-d8c796598-6klcl" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.177663 4767 scope.go:117] "RemoveContainer" containerID="942ce27bbd3280d93225ddf91c976f3ad8675312db96cb78cd03907ba574b336" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.182370 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" event={"ID":"1a897999-67ba-4c0e-aada-60d95d9e54b8","Type":"ContainerDied","Data":"235ee4bbced39ae9a9d4551b9429e31cf69a22c6ee4cbaf55fbc679d9d3f071e"} Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.182489 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.196332 4767 scope.go:117] "RemoveContainer" containerID="d297c1f9b895c79f3aef36347426c8039ddb65213e5611963243e02948e209c3" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.200721 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-d8c796598-6klcl"] Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.206513 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-d8c796598-6klcl"] Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.217016 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf"] Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.221900 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf"] Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.312221 4767 patch_prober.go:28] interesting pod/route-controller-manager-7546c7d565-jgdxf container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.312350 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7546c7d565-jgdxf" podUID="1a897999-67ba-4c0e-aada-60d95d9e54b8" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.480124 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-f49b86d76-bv6fd"] Jan 28 18:35:39 crc kubenswrapper[4767]: E0128 18:35:39.480837 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="962c2b7d-7d34-4e1b-8e6a-6e798e243a27" containerName="controller-manager" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.480928 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="962c2b7d-7d34-4e1b-8e6a-6e798e243a27" containerName="controller-manager" Jan 28 18:35:39 crc kubenswrapper[4767]: E0128 18:35:39.481071 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a897999-67ba-4c0e-aada-60d95d9e54b8" containerName="route-controller-manager" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.481143 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a897999-67ba-4c0e-aada-60d95d9e54b8" containerName="route-controller-manager" Jan 28 18:35:39 crc kubenswrapper[4767]: E0128 18:35:39.481243 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.481316 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.481646 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="962c2b7d-7d34-4e1b-8e6a-6e798e243a27" containerName="controller-manager" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.481761 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a897999-67ba-4c0e-aada-60d95d9e54b8" containerName="route-controller-manager" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.481864 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.485476 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n"] Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.486660 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.491345 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-f49b86d76-bv6fd"] Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.492621 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.492710 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.492728 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.492933 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.493063 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.493378 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.496720 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.496845 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.497502 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.497689 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.497807 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.497931 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.498028 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n"] Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.512363 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.512788 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.575900 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0536729-b321-42d5-9620-e8fc6f2617cd-serving-cert\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.575977 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-config\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.576142 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8b07a3-3a51-432a-a78e-b8e598a59a15-serving-cert\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.576304 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-client-ca\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.576338 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k58zl\" (UniqueName: \"kubernetes.io/projected/b0536729-b321-42d5-9620-e8fc6f2617cd-kube-api-access-k58zl\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.576392 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-proxy-ca-bundles\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.576551 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-client-ca\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.576603 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fv6c6\" (UniqueName: \"kubernetes.io/projected/da8b07a3-3a51-432a-a78e-b8e598a59a15-kube-api-access-fv6c6\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.576691 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-config\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.677844 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0536729-b321-42d5-9620-e8fc6f2617cd-serving-cert\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.677905 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-config\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.677932 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8b07a3-3a51-432a-a78e-b8e598a59a15-serving-cert\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.677955 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-client-ca\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.677982 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k58zl\" (UniqueName: \"kubernetes.io/projected/b0536729-b321-42d5-9620-e8fc6f2617cd-kube-api-access-k58zl\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.678007 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-proxy-ca-bundles\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.678043 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-client-ca\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.678077 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fv6c6\" (UniqueName: \"kubernetes.io/projected/da8b07a3-3a51-432a-a78e-b8e598a59a15-kube-api-access-fv6c6\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.678103 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-config\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.680257 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-config\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.680535 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-proxy-ca-bundles\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.680888 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-client-ca\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.681051 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-client-ca\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.681522 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-config\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.687880 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0536729-b321-42d5-9620-e8fc6f2617cd-serving-cert\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.691244 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8b07a3-3a51-432a-a78e-b8e598a59a15-serving-cert\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.697241 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k58zl\" (UniqueName: \"kubernetes.io/projected/b0536729-b321-42d5-9620-e8fc6f2617cd-kube-api-access-k58zl\") pod \"route-controller-manager-645cc58796-7w84n\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.700448 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fv6c6\" (UniqueName: \"kubernetes.io/projected/da8b07a3-3a51-432a-a78e-b8e598a59a15-kube-api-access-fv6c6\") pod \"controller-manager-f49b86d76-bv6fd\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.814968 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:39 crc kubenswrapper[4767]: I0128 18:35:39.831629 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:40 crc kubenswrapper[4767]: I0128 18:35:40.030745 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n"] Jan 28 18:35:40 crc kubenswrapper[4767]: I0128 18:35:40.200741 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" event={"ID":"b0536729-b321-42d5-9620-e8fc6f2617cd","Type":"ContainerStarted","Data":"41e42329d8ac2e34478474074c697d2ea213dd3f6564ceda305c6b67bea69200"} Jan 28 18:35:40 crc kubenswrapper[4767]: I0128 18:35:40.282326 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-f49b86d76-bv6fd"] Jan 28 18:35:40 crc kubenswrapper[4767]: W0128 18:35:40.289334 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podda8b07a3_3a51_432a_a78e_b8e598a59a15.slice/crio-111aebbdfd6706f46f4efb0c698f0029f31b02c33a666f2f2e8a05251bba542c WatchSource:0}: Error finding container 111aebbdfd6706f46f4efb0c698f0029f31b02c33a666f2f2e8a05251bba542c: Status 404 returned error can't find the container with id 111aebbdfd6706f46f4efb0c698f0029f31b02c33a666f2f2e8a05251bba542c Jan 28 18:35:40 crc kubenswrapper[4767]: I0128 18:35:40.360980 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 28 18:35:40 crc kubenswrapper[4767]: I0128 18:35:40.802076 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a897999-67ba-4c0e-aada-60d95d9e54b8" path="/var/lib/kubelet/pods/1a897999-67ba-4c0e-aada-60d95d9e54b8/volumes" Jan 28 18:35:40 crc kubenswrapper[4767]: I0128 18:35:40.802839 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="962c2b7d-7d34-4e1b-8e6a-6e798e243a27" path="/var/lib/kubelet/pods/962c2b7d-7d34-4e1b-8e6a-6e798e243a27/volumes" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.164022 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.209981 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" event={"ID":"b0536729-b321-42d5-9620-e8fc6f2617cd","Type":"ContainerStarted","Data":"eb756c74e0f6151d7175b5ab031e19bd11ae890304b54915dac5fc3be98d4ecf"} Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.210457 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.212090 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" event={"ID":"da8b07a3-3a51-432a-a78e-b8e598a59a15","Type":"ContainerStarted","Data":"d477fe8d5a4e366e58dd52cb93bdc73e48c2e4a64cecdc654e8d56025f1b60e2"} Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.212130 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" event={"ID":"da8b07a3-3a51-432a-a78e-b8e598a59a15","Type":"ContainerStarted","Data":"111aebbdfd6706f46f4efb0c698f0029f31b02c33a666f2f2e8a05251bba542c"} Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.212330 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.215587 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.216904 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.236224 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" podStartSLOduration=4.236178239 podStartE2EDuration="4.236178239s" podCreationTimestamp="2026-01-28 18:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:35:41.232254375 +0000 UTC m=+347.196437269" watchObservedRunningTime="2026-01-28 18:35:41.236178239 +0000 UTC m=+347.200361123" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.252819 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" podStartSLOduration=4.252795472 podStartE2EDuration="4.252795472s" podCreationTimestamp="2026-01-28 18:35:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:35:41.250782199 +0000 UTC m=+347.214965063" watchObservedRunningTime="2026-01-28 18:35:41.252795472 +0000 UTC m=+347.216978346" Jan 28 18:35:41 crc kubenswrapper[4767]: I0128 18:35:41.712752 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 28 18:35:42 crc kubenswrapper[4767]: I0128 18:35:42.448353 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 28 18:35:42 crc kubenswrapper[4767]: I0128 18:35:42.859731 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 28 18:35:43 crc kubenswrapper[4767]: I0128 18:35:43.402642 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 28 18:35:46 crc kubenswrapper[4767]: I0128 18:35:46.529623 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 28 18:35:53 crc kubenswrapper[4767]: I0128 18:35:53.068918 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 28 18:35:57 crc kubenswrapper[4767]: I0128 18:35:57.766284 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-f49b86d76-bv6fd"] Jan 28 18:35:57 crc kubenswrapper[4767]: I0128 18:35:57.767237 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" podUID="da8b07a3-3a51-432a-a78e-b8e598a59a15" containerName="controller-manager" containerID="cri-o://d477fe8d5a4e366e58dd52cb93bdc73e48c2e4a64cecdc654e8d56025f1b60e2" gracePeriod=30 Jan 28 18:35:57 crc kubenswrapper[4767]: I0128 18:35:57.781656 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n"] Jan 28 18:35:57 crc kubenswrapper[4767]: I0128 18:35:57.782098 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" podUID="b0536729-b321-42d5-9620-e8fc6f2617cd" containerName="route-controller-manager" containerID="cri-o://eb756c74e0f6151d7175b5ab031e19bd11ae890304b54915dac5fc3be98d4ecf" gracePeriod=30 Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.327689 4767 generic.go:334] "Generic (PLEG): container finished" podID="b0536729-b321-42d5-9620-e8fc6f2617cd" containerID="eb756c74e0f6151d7175b5ab031e19bd11ae890304b54915dac5fc3be98d4ecf" exitCode=0 Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.327771 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" event={"ID":"b0536729-b321-42d5-9620-e8fc6f2617cd","Type":"ContainerDied","Data":"eb756c74e0f6151d7175b5ab031e19bd11ae890304b54915dac5fc3be98d4ecf"} Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.327801 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" event={"ID":"b0536729-b321-42d5-9620-e8fc6f2617cd","Type":"ContainerDied","Data":"41e42329d8ac2e34478474074c697d2ea213dd3f6564ceda305c6b67bea69200"} Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.327814 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="41e42329d8ac2e34478474074c697d2ea213dd3f6564ceda305c6b67bea69200" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.330095 4767 generic.go:334] "Generic (PLEG): container finished" podID="da8b07a3-3a51-432a-a78e-b8e598a59a15" containerID="d477fe8d5a4e366e58dd52cb93bdc73e48c2e4a64cecdc654e8d56025f1b60e2" exitCode=0 Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.330157 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" event={"ID":"da8b07a3-3a51-432a-a78e-b8e598a59a15","Type":"ContainerDied","Data":"d477fe8d5a4e366e58dd52cb93bdc73e48c2e4a64cecdc654e8d56025f1b60e2"} Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.334028 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-86rk5"] Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.334406 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-86rk5" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="registry-server" containerID="cri-o://87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8" gracePeriod=2 Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.358018 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.525451 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.531985 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6w58h"] Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.535008 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6w58h" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="registry-server" containerID="cri-o://b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175" gracePeriod=2 Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.550448 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-config\") pod \"b0536729-b321-42d5-9620-e8fc6f2617cd\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.550640 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0536729-b321-42d5-9620-e8fc6f2617cd-serving-cert\") pod \"b0536729-b321-42d5-9620-e8fc6f2617cd\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.550672 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-client-ca\") pod \"b0536729-b321-42d5-9620-e8fc6f2617cd\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.550852 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k58zl\" (UniqueName: \"kubernetes.io/projected/b0536729-b321-42d5-9620-e8fc6f2617cd-kube-api-access-k58zl\") pod \"b0536729-b321-42d5-9620-e8fc6f2617cd\" (UID: \"b0536729-b321-42d5-9620-e8fc6f2617cd\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.552484 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-client-ca" (OuterVolumeSpecName: "client-ca") pod "b0536729-b321-42d5-9620-e8fc6f2617cd" (UID: "b0536729-b321-42d5-9620-e8fc6f2617cd"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.553028 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-config" (OuterVolumeSpecName: "config") pod "b0536729-b321-42d5-9620-e8fc6f2617cd" (UID: "b0536729-b321-42d5-9620-e8fc6f2617cd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.565429 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0536729-b321-42d5-9620-e8fc6f2617cd-kube-api-access-k58zl" (OuterVolumeSpecName: "kube-api-access-k58zl") pod "b0536729-b321-42d5-9620-e8fc6f2617cd" (UID: "b0536729-b321-42d5-9620-e8fc6f2617cd"). InnerVolumeSpecName "kube-api-access-k58zl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.566515 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b0536729-b321-42d5-9620-e8fc6f2617cd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b0536729-b321-42d5-9620-e8fc6f2617cd" (UID: "b0536729-b321-42d5-9620-e8fc6f2617cd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652057 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-proxy-ca-bundles\") pod \"da8b07a3-3a51-432a-a78e-b8e598a59a15\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652161 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-config\") pod \"da8b07a3-3a51-432a-a78e-b8e598a59a15\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652393 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fv6c6\" (UniqueName: \"kubernetes.io/projected/da8b07a3-3a51-432a-a78e-b8e598a59a15-kube-api-access-fv6c6\") pod \"da8b07a3-3a51-432a-a78e-b8e598a59a15\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652437 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8b07a3-3a51-432a-a78e-b8e598a59a15-serving-cert\") pod \"da8b07a3-3a51-432a-a78e-b8e598a59a15\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652472 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-client-ca\") pod \"da8b07a3-3a51-432a-a78e-b8e598a59a15\" (UID: \"da8b07a3-3a51-432a-a78e-b8e598a59a15\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652852 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b0536729-b321-42d5-9620-e8fc6f2617cd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652875 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652893 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k58zl\" (UniqueName: \"kubernetes.io/projected/b0536729-b321-42d5-9620-e8fc6f2617cd-kube-api-access-k58zl\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.652908 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b0536729-b321-42d5-9620-e8fc6f2617cd-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.653306 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "da8b07a3-3a51-432a-a78e-b8e598a59a15" (UID: "da8b07a3-3a51-432a-a78e-b8e598a59a15"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.653342 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-client-ca" (OuterVolumeSpecName: "client-ca") pod "da8b07a3-3a51-432a-a78e-b8e598a59a15" (UID: "da8b07a3-3a51-432a-a78e-b8e598a59a15"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.654736 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-config" (OuterVolumeSpecName: "config") pod "da8b07a3-3a51-432a-a78e-b8e598a59a15" (UID: "da8b07a3-3a51-432a-a78e-b8e598a59a15"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.657665 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da8b07a3-3a51-432a-a78e-b8e598a59a15-kube-api-access-fv6c6" (OuterVolumeSpecName: "kube-api-access-fv6c6") pod "da8b07a3-3a51-432a-a78e-b8e598a59a15" (UID: "da8b07a3-3a51-432a-a78e-b8e598a59a15"). InnerVolumeSpecName "kube-api-access-fv6c6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.658679 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da8b07a3-3a51-432a-a78e-b8e598a59a15-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "da8b07a3-3a51-432a-a78e-b8e598a59a15" (UID: "da8b07a3-3a51-432a-a78e-b8e598a59a15"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: E0128 18:35:58.697487 4767 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0ee47b3_df01_4b36_8d19_ead3db6a705d.slice/crio-conmon-b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175.scope\": RecentStats: unable to find data in memory cache]" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.746552 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.753598 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fv6c6\" (UniqueName: \"kubernetes.io/projected/da8b07a3-3a51-432a-a78e-b8e598a59a15-kube-api-access-fv6c6\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.753717 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/da8b07a3-3a51-432a-a78e-b8e598a59a15-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.753784 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.753867 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.753955 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/da8b07a3-3a51-432a-a78e-b8e598a59a15-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.855546 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-catalog-content\") pod \"c91bd017-c929-4891-9118-95e20ef61238\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.856657 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-utilities\") pod \"c91bd017-c929-4891-9118-95e20ef61238\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.856795 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjgbt\" (UniqueName: \"kubernetes.io/projected/c91bd017-c929-4891-9118-95e20ef61238-kube-api-access-bjgbt\") pod \"c91bd017-c929-4891-9118-95e20ef61238\" (UID: \"c91bd017-c929-4891-9118-95e20ef61238\") " Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.857720 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-utilities" (OuterVolumeSpecName: "utilities") pod "c91bd017-c929-4891-9118-95e20ef61238" (UID: "c91bd017-c929-4891-9118-95e20ef61238"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.862713 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c91bd017-c929-4891-9118-95e20ef61238-kube-api-access-bjgbt" (OuterVolumeSpecName: "kube-api-access-bjgbt") pod "c91bd017-c929-4891-9118-95e20ef61238" (UID: "c91bd017-c929-4891-9118-95e20ef61238"). InnerVolumeSpecName "kube-api-access-bjgbt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.910335 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c91bd017-c929-4891-9118-95e20ef61238" (UID: "c91bd017-c929-4891-9118-95e20ef61238"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.955725 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.959930 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.960445 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c91bd017-c929-4891-9118-95e20ef61238-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:58 crc kubenswrapper[4767]: I0128 18:35:58.960628 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjgbt\" (UniqueName: \"kubernetes.io/projected/c91bd017-c929-4891-9118-95e20ef61238-kube-api-access-bjgbt\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.061390 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-utilities\") pod \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.061476 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8v5hl\" (UniqueName: \"kubernetes.io/projected/b0ee47b3-df01-4b36-8d19-ead3db6a705d-kube-api-access-8v5hl\") pod \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.061515 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-catalog-content\") pod \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\" (UID: \"b0ee47b3-df01-4b36-8d19-ead3db6a705d\") " Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.062744 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-utilities" (OuterVolumeSpecName: "utilities") pod "b0ee47b3-df01-4b36-8d19-ead3db6a705d" (UID: "b0ee47b3-df01-4b36-8d19-ead3db6a705d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.065369 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0ee47b3-df01-4b36-8d19-ead3db6a705d-kube-api-access-8v5hl" (OuterVolumeSpecName: "kube-api-access-8v5hl") pod "b0ee47b3-df01-4b36-8d19-ead3db6a705d" (UID: "b0ee47b3-df01-4b36-8d19-ead3db6a705d"). InnerVolumeSpecName "kube-api-access-8v5hl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.112875 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b0ee47b3-df01-4b36-8d19-ead3db6a705d" (UID: "b0ee47b3-df01-4b36-8d19-ead3db6a705d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.162590 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.162988 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8v5hl\" (UniqueName: \"kubernetes.io/projected/b0ee47b3-df01-4b36-8d19-ead3db6a705d-kube-api-access-8v5hl\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.163001 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b0ee47b3-df01-4b36-8d19-ead3db6a705d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.345793 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" event={"ID":"da8b07a3-3a51-432a-a78e-b8e598a59a15","Type":"ContainerDied","Data":"111aebbdfd6706f46f4efb0c698f0029f31b02c33a666f2f2e8a05251bba542c"} Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.345876 4767 scope.go:117] "RemoveContainer" containerID="d477fe8d5a4e366e58dd52cb93bdc73e48c2e4a64cecdc654e8d56025f1b60e2" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.346142 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-f49b86d76-bv6fd" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.353708 4767 generic.go:334] "Generic (PLEG): container finished" podID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerID="b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175" exitCode=0 Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.353816 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6w58h" event={"ID":"b0ee47b3-df01-4b36-8d19-ead3db6a705d","Type":"ContainerDied","Data":"b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175"} Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.353870 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6w58h" event={"ID":"b0ee47b3-df01-4b36-8d19-ead3db6a705d","Type":"ContainerDied","Data":"b792360462326394fe8d0642cb85ffce6910ede30f7bd7d00e819806a768b842"} Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.353862 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6w58h" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.356853 4767 generic.go:334] "Generic (PLEG): container finished" podID="c91bd017-c929-4891-9118-95e20ef61238" containerID="87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8" exitCode=0 Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.356950 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.356943 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86rk5" event={"ID":"c91bd017-c929-4891-9118-95e20ef61238","Type":"ContainerDied","Data":"87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8"} Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.357060 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86rk5" event={"ID":"c91bd017-c929-4891-9118-95e20ef61238","Type":"ContainerDied","Data":"0894a500d8f40b74ad7f4c66a10b7ec12104b31693e91fab84926d26dc707785"} Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.356950 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86rk5" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.371686 4767 scope.go:117] "RemoveContainer" containerID="b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.372022 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-f49b86d76-bv6fd"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.376309 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-f49b86d76-bv6fd"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.393433 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.393463 4767 scope.go:117] "RemoveContainer" containerID="f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.395875 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-645cc58796-7w84n"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.410613 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-86rk5"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.414723 4767 scope.go:117] "RemoveContainer" containerID="dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.418180 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-86rk5"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.437359 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6w58h"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.439656 4767 scope.go:117] "RemoveContainer" containerID="b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.440117 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175\": container with ID starting with b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175 not found: ID does not exist" containerID="b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.440162 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175"} err="failed to get container status \"b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175\": rpc error: code = NotFound desc = could not find container \"b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175\": container with ID starting with b45f6bcd8d08c9ee4d316898a146dc687b65b4348adcddc39736645091365175 not found: ID does not exist" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.440190 4767 scope.go:117] "RemoveContainer" containerID="f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.440512 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6w58h"] Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.440567 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e\": container with ID starting with f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e not found: ID does not exist" containerID="f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.440582 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e"} err="failed to get container status \"f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e\": rpc error: code = NotFound desc = could not find container \"f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e\": container with ID starting with f265a8cacb89b37453bbac1f5b5043377e8b536fe961fbd1b293fe59a4cc4c3e not found: ID does not exist" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.440594 4767 scope.go:117] "RemoveContainer" containerID="dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.440911 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123\": container with ID starting with dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123 not found: ID does not exist" containerID="dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.440933 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123"} err="failed to get container status \"dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123\": rpc error: code = NotFound desc = could not find container \"dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123\": container with ID starting with dcb587a4bb8875b8cf84c6e4c6f5fbd5d6895f2961c122f737bfa8a637ac7123 not found: ID does not exist" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.440949 4767 scope.go:117] "RemoveContainer" containerID="87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.455892 4767 scope.go:117] "RemoveContainer" containerID="7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.470514 4767 scope.go:117] "RemoveContainer" containerID="bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.489030 4767 scope.go:117] "RemoveContainer" containerID="87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.491746 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8\": container with ID starting with 87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8 not found: ID does not exist" containerID="87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.491797 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8"} err="failed to get container status \"87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8\": rpc error: code = NotFound desc = could not find container \"87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8\": container with ID starting with 87ca26cc89d1d794971a69f503dab81a31a4319481f881efdc8f7075b62d35b8 not found: ID does not exist" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.491831 4767 scope.go:117] "RemoveContainer" containerID="7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.492463 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2\": container with ID starting with 7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2 not found: ID does not exist" containerID="7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.492491 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2"} err="failed to get container status \"7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2\": rpc error: code = NotFound desc = could not find container \"7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2\": container with ID starting with 7294ef78274eee2299c6db1498f7cf25c7f812d76c6873c3a73617b7d7f55bd2 not found: ID does not exist" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.492510 4767 scope.go:117] "RemoveContainer" containerID="bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.494822 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750\": container with ID starting with bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750 not found: ID does not exist" containerID="bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.494857 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750"} err="failed to get container status \"bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750\": rpc error: code = NotFound desc = could not find container \"bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750\": container with ID starting with bdcf1dc2f963e3f638994768d6d49570f12d8b742bd776a486a486861dfdc750 not found: ID does not exist" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.496970 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl"] Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497348 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="extract-content" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497393 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="extract-content" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497410 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0536729-b321-42d5-9620-e8fc6f2617cd" containerName="route-controller-manager" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497417 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0536729-b321-42d5-9620-e8fc6f2617cd" containerName="route-controller-manager" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497426 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da8b07a3-3a51-432a-a78e-b8e598a59a15" containerName="controller-manager" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497434 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="da8b07a3-3a51-432a-a78e-b8e598a59a15" containerName="controller-manager" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497468 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="registry-server" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497476 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="registry-server" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497487 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="extract-content" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497495 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="extract-content" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497506 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="extract-utilities" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497512 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="extract-utilities" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497547 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="extract-utilities" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497554 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="extract-utilities" Jan 28 18:35:59 crc kubenswrapper[4767]: E0128 18:35:59.497562 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="registry-server" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497570 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="registry-server" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497668 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" containerName="registry-server" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497678 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="da8b07a3-3a51-432a-a78e-b8e598a59a15" containerName="controller-manager" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497691 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91bd017-c929-4891-9118-95e20ef61238" containerName="registry-server" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.497700 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0536729-b321-42d5-9620-e8fc6f2617cd" containerName="route-controller-manager" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.498863 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.500148 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-769864866-2p6v9"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.500788 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.504459 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.504566 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.505147 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.505480 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.505985 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.507023 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.508704 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.516066 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.517233 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.518835 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.519527 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.520059 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.520186 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.520725 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.526161 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-769864866-2p6v9"] Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.670753 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-config\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.670860 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r84l2\" (UniqueName: \"kubernetes.io/projected/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-kube-api-access-r84l2\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.670921 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-client-ca\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.670971 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-serving-cert\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.671319 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-proxy-ca-bundles\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.671546 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-serving-cert\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.671635 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9bml\" (UniqueName: \"kubernetes.io/projected/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-kube-api-access-p9bml\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.671669 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-client-ca\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.671696 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-config\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773087 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-config\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773157 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r84l2\" (UniqueName: \"kubernetes.io/projected/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-kube-api-access-r84l2\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773200 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-client-ca\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773258 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-serving-cert\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773293 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-proxy-ca-bundles\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773327 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-serving-cert\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773358 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9bml\" (UniqueName: \"kubernetes.io/projected/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-kube-api-access-p9bml\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773383 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-client-ca\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.773405 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-config\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.777222 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-config\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.779425 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-client-ca\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.779674 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-client-ca\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.779737 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-config\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.780806 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-proxy-ca-bundles\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.783322 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-serving-cert\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.787791 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-serving-cert\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.792003 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9bml\" (UniqueName: \"kubernetes.io/projected/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-kube-api-access-p9bml\") pod \"route-controller-manager-5ff7dfcb68-bnksl\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.794620 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r84l2\" (UniqueName: \"kubernetes.io/projected/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-kube-api-access-r84l2\") pod \"controller-manager-769864866-2p6v9\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.831662 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:35:59 crc kubenswrapper[4767]: I0128 18:35:59.844774 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.125160 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-769864866-2p6v9"] Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.368231 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" event={"ID":"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc","Type":"ContainerStarted","Data":"1094152eeba985a453e1e067e800a5409fb77e6d992c378d6b4568cb26b49eec"} Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.368283 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" event={"ID":"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc","Type":"ContainerStarted","Data":"0751835d3c92cffe5c22117622ae7d97595dc03f27c778c0795a6af5c7d1f77b"} Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.397073 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl"] Jan 28 18:36:00 crc kubenswrapper[4767]: W0128 18:36:00.406273 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ac04423_89fa_4e06_b3f1_a9cf3b840d82.slice/crio-495c6a89cf71e99d53bf60e70e7767be6b455823bb89a756cc4a54b23171d854 WatchSource:0}: Error finding container 495c6a89cf71e99d53bf60e70e7767be6b455823bb89a756cc4a54b23171d854: Status 404 returned error can't find the container with id 495c6a89cf71e99d53bf60e70e7767be6b455823bb89a756cc4a54b23171d854 Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.801608 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0536729-b321-42d5-9620-e8fc6f2617cd" path="/var/lib/kubelet/pods/b0536729-b321-42d5-9620-e8fc6f2617cd/volumes" Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.802288 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0ee47b3-df01-4b36-8d19-ead3db6a705d" path="/var/lib/kubelet/pods/b0ee47b3-df01-4b36-8d19-ead3db6a705d/volumes" Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.802878 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c91bd017-c929-4891-9118-95e20ef61238" path="/var/lib/kubelet/pods/c91bd017-c929-4891-9118-95e20ef61238/volumes" Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.803485 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da8b07a3-3a51-432a-a78e-b8e598a59a15" path="/var/lib/kubelet/pods/da8b07a3-3a51-432a-a78e-b8e598a59a15/volumes" Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.930860 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9c"] Jan 28 18:36:00 crc kubenswrapper[4767]: I0128 18:36:00.931258 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-bfc9c" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="registry-server" containerID="cri-o://ab4b960f985f27e5759720d30b4623a6661805ceed0e606c66a33e102b37cf94" gracePeriod=2 Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.133983 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k5t94"] Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.134500 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-k5t94" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="registry-server" containerID="cri-o://3af210b9d5bf2d5a5b6e53610ae2407a11a993242853daf544c28a7001212a54" gracePeriod=2 Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.377175 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" event={"ID":"5ac04423-89fa-4e06-b3f1-a9cf3b840d82","Type":"ContainerStarted","Data":"4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b"} Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.377342 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" event={"ID":"5ac04423-89fa-4e06-b3f1-a9cf3b840d82","Type":"ContainerStarted","Data":"495c6a89cf71e99d53bf60e70e7767be6b455823bb89a756cc4a54b23171d854"} Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.377376 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.381827 4767 generic.go:334] "Generic (PLEG): container finished" podID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerID="3af210b9d5bf2d5a5b6e53610ae2407a11a993242853daf544c28a7001212a54" exitCode=0 Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.381904 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k5t94" event={"ID":"3482059d-fb54-44c1-8fd4-b4ca29e633e4","Type":"ContainerDied","Data":"3af210b9d5bf2d5a5b6e53610ae2407a11a993242853daf544c28a7001212a54"} Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.384559 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.385581 4767 generic.go:334] "Generic (PLEG): container finished" podID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerID="ab4b960f985f27e5759720d30b4623a6661805ceed0e606c66a33e102b37cf94" exitCode=0 Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.385642 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9c" event={"ID":"2e90df07-d5ed-42b7-a3d8-de62235b551d","Type":"ContainerDied","Data":"ab4b960f985f27e5759720d30b4623a6661805ceed0e606c66a33e102b37cf94"} Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.385921 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.391867 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.405408 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" podStartSLOduration=4.405350261 podStartE2EDuration="4.405350261s" podCreationTimestamp="2026-01-28 18:35:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:01.404028619 +0000 UTC m=+367.368211503" watchObservedRunningTime="2026-01-28 18:36:01.405350261 +0000 UTC m=+367.369533135" Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.460606 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" podStartSLOduration=4.460587179 podStartE2EDuration="4.460587179s" podCreationTimestamp="2026-01-28 18:35:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:01.453049181 +0000 UTC m=+367.417232055" watchObservedRunningTime="2026-01-28 18:36:01.460587179 +0000 UTC m=+367.424770053" Jan 28 18:36:01 crc kubenswrapper[4767]: I0128 18:36:01.992145 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.119999 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.127173 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nttm\" (UniqueName: \"kubernetes.io/projected/2e90df07-d5ed-42b7-a3d8-de62235b551d-kube-api-access-6nttm\") pod \"2e90df07-d5ed-42b7-a3d8-de62235b551d\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.127482 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-utilities\") pod \"2e90df07-d5ed-42b7-a3d8-de62235b551d\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.127534 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-catalog-content\") pod \"2e90df07-d5ed-42b7-a3d8-de62235b551d\" (UID: \"2e90df07-d5ed-42b7-a3d8-de62235b551d\") " Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.129331 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-utilities" (OuterVolumeSpecName: "utilities") pod "2e90df07-d5ed-42b7-a3d8-de62235b551d" (UID: "2e90df07-d5ed-42b7-a3d8-de62235b551d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.135579 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e90df07-d5ed-42b7-a3d8-de62235b551d-kube-api-access-6nttm" (OuterVolumeSpecName: "kube-api-access-6nttm") pod "2e90df07-d5ed-42b7-a3d8-de62235b551d" (UID: "2e90df07-d5ed-42b7-a3d8-de62235b551d"). InnerVolumeSpecName "kube-api-access-6nttm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.171690 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e90df07-d5ed-42b7-a3d8-de62235b551d" (UID: "2e90df07-d5ed-42b7-a3d8-de62235b551d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.229200 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-82vpb\" (UniqueName: \"kubernetes.io/projected/3482059d-fb54-44c1-8fd4-b4ca29e633e4-kube-api-access-82vpb\") pod \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.229841 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-catalog-content\") pod \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.229931 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-utilities\") pod \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\" (UID: \"3482059d-fb54-44c1-8fd4-b4ca29e633e4\") " Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.231182 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-utilities" (OuterVolumeSpecName: "utilities") pod "3482059d-fb54-44c1-8fd4-b4ca29e633e4" (UID: "3482059d-fb54-44c1-8fd4-b4ca29e633e4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.231472 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.231526 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e90df07-d5ed-42b7-a3d8-de62235b551d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.231545 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nttm\" (UniqueName: \"kubernetes.io/projected/2e90df07-d5ed-42b7-a3d8-de62235b551d-kube-api-access-6nttm\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.231559 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.233398 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3482059d-fb54-44c1-8fd4-b4ca29e633e4-kube-api-access-82vpb" (OuterVolumeSpecName: "kube-api-access-82vpb") pod "3482059d-fb54-44c1-8fd4-b4ca29e633e4" (UID: "3482059d-fb54-44c1-8fd4-b4ca29e633e4"). InnerVolumeSpecName "kube-api-access-82vpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.333122 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-82vpb\" (UniqueName: \"kubernetes.io/projected/3482059d-fb54-44c1-8fd4-b4ca29e633e4-kube-api-access-82vpb\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.350667 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3482059d-fb54-44c1-8fd4-b4ca29e633e4" (UID: "3482059d-fb54-44c1-8fd4-b4ca29e633e4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.397035 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k5t94" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.397792 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k5t94" event={"ID":"3482059d-fb54-44c1-8fd4-b4ca29e633e4","Type":"ContainerDied","Data":"e8f124c04b104347914a03864ccdf425d41a0886ea3299362b441f996bfd3263"} Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.398257 4767 scope.go:117] "RemoveContainer" containerID="3af210b9d5bf2d5a5b6e53610ae2407a11a993242853daf544c28a7001212a54" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.400378 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-bfc9c" event={"ID":"2e90df07-d5ed-42b7-a3d8-de62235b551d","Type":"ContainerDied","Data":"7b4dd733fa0491b0ac8736274b97fa1ed23988192d7445fe981febcfa7e8169f"} Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.400482 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-bfc9c" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.435021 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3482059d-fb54-44c1-8fd4-b4ca29e633e4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.438548 4767 scope.go:117] "RemoveContainer" containerID="abce2e2a228b450d143edb1f5d104ee5c697c98b65ac8b76f3b735b8d5161483" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.444012 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9c"] Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.445433 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-bfc9c"] Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.460720 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k5t94"] Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.465828 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-k5t94"] Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.467837 4767 scope.go:117] "RemoveContainer" containerID="24b8d1ffb4d3253d79b444fd87a36f35c3a14f0287a0a7d097772358984e63df" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.489518 4767 scope.go:117] "RemoveContainer" containerID="ab4b960f985f27e5759720d30b4623a6661805ceed0e606c66a33e102b37cf94" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.507655 4767 scope.go:117] "RemoveContainer" containerID="c5528194888f5734c9ff748d5785546b4bd4f872f2299117d049fe6783556898" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.529264 4767 scope.go:117] "RemoveContainer" containerID="3c9f535c2187e6cc243264add42717998fbec5665ad90450db5da2b6ea5ae1f9" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.802924 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" path="/var/lib/kubelet/pods/2e90df07-d5ed-42b7-a3d8-de62235b551d/volumes" Jan 28 18:36:02 crc kubenswrapper[4767]: I0128 18:36:02.803559 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" path="/var/lib/kubelet/pods/3482059d-fb54-44c1-8fd4-b4ca29e633e4/volumes" Jan 28 18:36:05 crc kubenswrapper[4767]: I0128 18:36:05.987900 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pbbt4"] Jan 28 18:36:15 crc kubenswrapper[4767]: I0128 18:36:15.455543 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:36:15 crc kubenswrapper[4767]: I0128 18:36:15.455606 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:36:17 crc kubenswrapper[4767]: I0128 18:36:17.765825 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-769864866-2p6v9"] Jan 28 18:36:17 crc kubenswrapper[4767]: I0128 18:36:17.766598 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" podUID="7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" containerName="controller-manager" containerID="cri-o://1094152eeba985a453e1e067e800a5409fb77e6d992c378d6b4568cb26b49eec" gracePeriod=30 Jan 28 18:36:17 crc kubenswrapper[4767]: I0128 18:36:17.861004 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl"] Jan 28 18:36:17 crc kubenswrapper[4767]: I0128 18:36:17.861260 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" podUID="5ac04423-89fa-4e06-b3f1-a9cf3b840d82" containerName="route-controller-manager" containerID="cri-o://4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b" gracePeriod=30 Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.396402 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.473521 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-config\") pod \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.473585 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9bml\" (UniqueName: \"kubernetes.io/projected/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-kube-api-access-p9bml\") pod \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.473672 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-client-ca\") pod \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.473706 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-serving-cert\") pod \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\" (UID: \"5ac04423-89fa-4e06-b3f1-a9cf3b840d82\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.474511 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-config" (OuterVolumeSpecName: "config") pod "5ac04423-89fa-4e06-b3f1-a9cf3b840d82" (UID: "5ac04423-89fa-4e06-b3f1-a9cf3b840d82"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.475126 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-client-ca" (OuterVolumeSpecName: "client-ca") pod "5ac04423-89fa-4e06-b3f1-a9cf3b840d82" (UID: "5ac04423-89fa-4e06-b3f1-a9cf3b840d82"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.488442 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-kube-api-access-p9bml" (OuterVolumeSpecName: "kube-api-access-p9bml") pod "5ac04423-89fa-4e06-b3f1-a9cf3b840d82" (UID: "5ac04423-89fa-4e06-b3f1-a9cf3b840d82"). InnerVolumeSpecName "kube-api-access-p9bml". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.489343 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5ac04423-89fa-4e06-b3f1-a9cf3b840d82" (UID: "5ac04423-89fa-4e06-b3f1-a9cf3b840d82"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.492915 4767 generic.go:334] "Generic (PLEG): container finished" podID="7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" containerID="1094152eeba985a453e1e067e800a5409fb77e6d992c378d6b4568cb26b49eec" exitCode=0 Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.492968 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" event={"ID":"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc","Type":"ContainerDied","Data":"1094152eeba985a453e1e067e800a5409fb77e6d992c378d6b4568cb26b49eec"} Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.505032 4767 generic.go:334] "Generic (PLEG): container finished" podID="5ac04423-89fa-4e06-b3f1-a9cf3b840d82" containerID="4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b" exitCode=0 Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.505109 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" event={"ID":"5ac04423-89fa-4e06-b3f1-a9cf3b840d82","Type":"ContainerDied","Data":"4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b"} Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.505165 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" event={"ID":"5ac04423-89fa-4e06-b3f1-a9cf3b840d82","Type":"ContainerDied","Data":"495c6a89cf71e99d53bf60e70e7767be6b455823bb89a756cc4a54b23171d854"} Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.505200 4767 scope.go:117] "RemoveContainer" containerID="4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.505251 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.527374 4767 scope.go:117] "RemoveContainer" containerID="4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b" Jan 28 18:36:18 crc kubenswrapper[4767]: E0128 18:36:18.531734 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b\": container with ID starting with 4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b not found: ID does not exist" containerID="4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.531808 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b"} err="failed to get container status \"4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b\": rpc error: code = NotFound desc = could not find container \"4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b\": container with ID starting with 4ad8428f0a99d0f3c7194e870cc3cb086e59e0c1504884528449ac90b9fcd91b not found: ID does not exist" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.539438 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl"] Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.544652 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5ff7dfcb68-bnksl"] Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.575433 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.575470 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.575482 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.575494 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9bml\" (UniqueName: \"kubernetes.io/projected/5ac04423-89fa-4e06-b3f1-a9cf3b840d82-kube-api-access-p9bml\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.817379 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5ac04423-89fa-4e06-b3f1-a9cf3b840d82" path="/var/lib/kubelet/pods/5ac04423-89fa-4e06-b3f1-a9cf3b840d82/volumes" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.938531 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.981029 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r84l2\" (UniqueName: \"kubernetes.io/projected/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-kube-api-access-r84l2\") pod \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.981071 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-proxy-ca-bundles\") pod \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.981354 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-client-ca\") pod \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.981372 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-config\") pod \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.981439 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-serving-cert\") pod \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\" (UID: \"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc\") " Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.982382 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" (UID: "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.982563 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-client-ca" (OuterVolumeSpecName: "client-ca") pod "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" (UID: "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.982696 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-config" (OuterVolumeSpecName: "config") pod "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" (UID: "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.987270 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-kube-api-access-r84l2" (OuterVolumeSpecName: "kube-api-access-r84l2") pod "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" (UID: "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc"). InnerVolumeSpecName "kube-api-access-r84l2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:18 crc kubenswrapper[4767]: I0128 18:36:18.994844 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" (UID: "7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.082899 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.082966 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r84l2\" (UniqueName: \"kubernetes.io/projected/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-kube-api-access-r84l2\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.082980 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.082994 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.083007 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.510817 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.510807 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-769864866-2p6v9" event={"ID":"7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc","Type":"ContainerDied","Data":"0751835d3c92cffe5c22117622ae7d97595dc03f27c778c0795a6af5c7d1f77b"} Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.510996 4767 scope.go:117] "RemoveContainer" containerID="1094152eeba985a453e1e067e800a5409fb77e6d992c378d6b4568cb26b49eec" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.512747 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn"] Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513088 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="extract-content" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513117 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="extract-content" Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513128 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5ac04423-89fa-4e06-b3f1-a9cf3b840d82" containerName="route-controller-manager" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513136 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="5ac04423-89fa-4e06-b3f1-a9cf3b840d82" containerName="route-controller-manager" Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513147 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" containerName="controller-manager" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513154 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" containerName="controller-manager" Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513165 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="extract-utilities" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513221 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="extract-utilities" Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513231 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="registry-server" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513237 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="registry-server" Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513242 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="registry-server" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513248 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="registry-server" Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513259 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="extract-content" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513269 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="extract-content" Jan 28 18:36:19 crc kubenswrapper[4767]: E0128 18:36:19.513281 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="extract-utilities" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513290 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="extract-utilities" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513399 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e90df07-d5ed-42b7-a3d8-de62235b551d" containerName="registry-server" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513410 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3482059d-fb54-44c1-8fd4-b4ca29e633e4" containerName="registry-server" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513424 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" containerName="controller-manager" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513437 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="5ac04423-89fa-4e06-b3f1-a9cf3b840d82" containerName="route-controller-manager" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.513880 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.516250 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5446967d5-cmrrc"] Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.516739 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.516847 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.517539 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.517628 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.517715 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.517807 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.518369 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.521398 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.521736 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.521938 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.522080 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.523257 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.524891 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.529482 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.541430 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn"] Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.541488 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5446967d5-cmrrc"] Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.588764 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-serving-cert\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.588829 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-client-ca\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.588863 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqggk\" (UniqueName: \"kubernetes.io/projected/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-kube-api-access-rqggk\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.588889 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-client-ca\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.588949 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjznp\" (UniqueName: \"kubernetes.io/projected/2bfc3ce4-de3d-4851-a062-836454d70736-kube-api-access-rjznp\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.588978 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-config\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.589006 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-proxy-ca-bundles\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.589028 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfc3ce4-de3d-4851-a062-836454d70736-serving-cert\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.589061 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-config\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.589727 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-769864866-2p6v9"] Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.592774 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-769864866-2p6v9"] Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.690757 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-proxy-ca-bundles\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.690809 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfc3ce4-de3d-4851-a062-836454d70736-serving-cert\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.690855 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-config\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.690877 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-serving-cert\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.690915 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-client-ca\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.690938 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-client-ca\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.690963 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqggk\" (UniqueName: \"kubernetes.io/projected/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-kube-api-access-rqggk\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.691035 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjznp\" (UniqueName: \"kubernetes.io/projected/2bfc3ce4-de3d-4851-a062-836454d70736-kube-api-access-rjznp\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.691066 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-config\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.692230 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-proxy-ca-bundles\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.692264 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-client-ca\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.692701 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-config\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.692808 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-client-ca\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.693098 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-config\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.694299 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfc3ce4-de3d-4851-a062-836454d70736-serving-cert\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.696982 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-serving-cert\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.711715 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjznp\" (UniqueName: \"kubernetes.io/projected/2bfc3ce4-de3d-4851-a062-836454d70736-kube-api-access-rjznp\") pod \"route-controller-manager-9f6f85dd9-6nmnn\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.712447 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqggk\" (UniqueName: \"kubernetes.io/projected/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-kube-api-access-rqggk\") pod \"controller-manager-5446967d5-cmrrc\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.875651 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:19 crc kubenswrapper[4767]: I0128 18:36:19.885759 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:20 crc kubenswrapper[4767]: I0128 18:36:20.338394 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn"] Jan 28 18:36:20 crc kubenswrapper[4767]: I0128 18:36:20.424900 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5446967d5-cmrrc"] Jan 28 18:36:20 crc kubenswrapper[4767]: I0128 18:36:20.530633 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" event={"ID":"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88","Type":"ContainerStarted","Data":"1c23714507860942fb8854fc55707bc277cbb901204f749f804923cf1ed069ad"} Jan 28 18:36:20 crc kubenswrapper[4767]: I0128 18:36:20.532006 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" event={"ID":"2bfc3ce4-de3d-4851-a062-836454d70736","Type":"ContainerStarted","Data":"6bac94648c30764c680df8907db40d2607349bbd254e5e92ad9f4b3430d0e7cd"} Jan 28 18:36:20 crc kubenswrapper[4767]: I0128 18:36:20.804867 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc" path="/var/lib/kubelet/pods/7b5a966e-45d9-4e4d-a2d6-ee6a70dcffdc/volumes" Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.541644 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" event={"ID":"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88","Type":"ContainerStarted","Data":"311d9fded3e55c4ee06520c9d901821d3e654397cd9d6e847b373d90094304ad"} Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.541935 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.543461 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" event={"ID":"2bfc3ce4-de3d-4851-a062-836454d70736","Type":"ContainerStarted","Data":"0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37"} Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.543684 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.546458 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.548647 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.562937 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" podStartSLOduration=4.562921007 podStartE2EDuration="4.562921007s" podCreationTimestamp="2026-01-28 18:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:21.562901806 +0000 UTC m=+387.527084680" watchObservedRunningTime="2026-01-28 18:36:21.562921007 +0000 UTC m=+387.527103881" Jan 28 18:36:21 crc kubenswrapper[4767]: I0128 18:36:21.599095 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" podStartSLOduration=4.599077135 podStartE2EDuration="4.599077135s" podCreationTimestamp="2026-01-28 18:36:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:21.594671647 +0000 UTC m=+387.558854521" watchObservedRunningTime="2026-01-28 18:36:21.599077135 +0000 UTC m=+387.563260009" Jan 28 18:36:31 crc kubenswrapper[4767]: I0128 18:36:31.017076 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" podUID="a5c87c48-f06a-4f35-a336-2d74a88c40ac" containerName="oauth-openshift" containerID="cri-o://7d7ba4d9c16ded0590c50d499150efab9253643be033ab4652e15e47709aa4f1" gracePeriod=15 Jan 28 18:36:31 crc kubenswrapper[4767]: I0128 18:36:31.598995 4767 generic.go:334] "Generic (PLEG): container finished" podID="a5c87c48-f06a-4f35-a336-2d74a88c40ac" containerID="7d7ba4d9c16ded0590c50d499150efab9253643be033ab4652e15e47709aa4f1" exitCode=0 Jan 28 18:36:31 crc kubenswrapper[4767]: I0128 18:36:31.599046 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" event={"ID":"a5c87c48-f06a-4f35-a336-2d74a88c40ac","Type":"ContainerDied","Data":"7d7ba4d9c16ded0590c50d499150efab9253643be033ab4652e15e47709aa4f1"} Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.078045 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.122447 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-866f46767b-52l4d"] Jan 28 18:36:32 crc kubenswrapper[4767]: E0128 18:36:32.123442 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5c87c48-f06a-4f35-a336-2d74a88c40ac" containerName="oauth-openshift" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.123486 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5c87c48-f06a-4f35-a336-2d74a88c40ac" containerName="oauth-openshift" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.123581 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5c87c48-f06a-4f35-a336-2d74a88c40ac" containerName="oauth-openshift" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.123992 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.140184 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-866f46767b-52l4d"] Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161256 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4kth\" (UniqueName: \"kubernetes.io/projected/a5c87c48-f06a-4f35-a336-2d74a88c40ac-kube-api-access-z4kth\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161341 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-cliconfig\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161368 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-error\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161390 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-login\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161440 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-ocp-branding-template\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161479 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-serving-cert\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161498 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-service-ca\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161519 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-idp-0-file-data\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161553 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-session\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161585 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-router-certs\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161620 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-dir\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161638 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-trusted-ca-bundle\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161672 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-policies\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161715 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-provider-selection\") pod \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\" (UID: \"a5c87c48-f06a-4f35-a336-2d74a88c40ac\") " Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161929 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-session\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161953 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-login\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.161972 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtmw4\" (UniqueName: \"kubernetes.io/projected/301e1444-f523-4a72-9064-306a63828663-kube-api-access-jtmw4\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162000 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/301e1444-f523-4a72-9064-306a63828663-audit-dir\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162025 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-serving-cert\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162041 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-error\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162058 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162085 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162115 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-service-ca\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162138 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-router-certs\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162236 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162273 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-audit-policies\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162290 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-cliconfig\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.162313 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.164088 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.164905 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.164994 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.165200 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.166644 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.172264 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.173017 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5c87c48-f06a-4f35-a336-2d74a88c40ac-kube-api-access-z4kth" (OuterVolumeSpecName: "kube-api-access-z4kth") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "kube-api-access-z4kth". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.173148 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.173907 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.175218 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.175577 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.176460 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.181377 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.181553 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a5c87c48-f06a-4f35-a336-2d74a88c40ac" (UID: "a5c87c48-f06a-4f35-a336-2d74a88c40ac"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263440 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtmw4\" (UniqueName: \"kubernetes.io/projected/301e1444-f523-4a72-9064-306a63828663-kube-api-access-jtmw4\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263493 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-session\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263509 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-login\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263532 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/301e1444-f523-4a72-9064-306a63828663-audit-dir\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263567 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-error\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263599 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-serving-cert\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263627 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263660 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263700 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-service-ca\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263725 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-router-certs\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263760 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263796 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-audit-policies\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263819 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-cliconfig\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263851 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263906 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4kth\" (UniqueName: \"kubernetes.io/projected/a5c87c48-f06a-4f35-a336-2d74a88c40ac-kube-api-access-z4kth\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263928 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263960 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263978 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263996 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264014 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264026 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264035 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264048 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264059 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264068 4767 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264078 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264087 4767 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a5c87c48-f06a-4f35-a336-2d74a88c40ac-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.264100 4767 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a5c87c48-f06a-4f35-a336-2d74a88c40ac-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.265031 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-service-ca\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.263793 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/301e1444-f523-4a72-9064-306a63828663-audit-dir\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.265582 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.266024 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-cliconfig\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.266134 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/301e1444-f523-4a72-9064-306a63828663-audit-policies\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.268810 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-session\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.269605 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-login\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.270679 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.271808 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-error\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.273230 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.282789 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.285688 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-router-certs\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.285932 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtmw4\" (UniqueName: \"kubernetes.io/projected/301e1444-f523-4a72-9064-306a63828663-kube-api-access-jtmw4\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.286907 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/301e1444-f523-4a72-9064-306a63828663-v4-0-config-system-serving-cert\") pod \"oauth-openshift-866f46767b-52l4d\" (UID: \"301e1444-f523-4a72-9064-306a63828663\") " pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.445190 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.614641 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" event={"ID":"a5c87c48-f06a-4f35-a336-2d74a88c40ac","Type":"ContainerDied","Data":"4dd5861936114db2f87774c10514331c2ecee4dbc785e6e5418f75e6e10e3a77"} Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.614704 4767 scope.go:117] "RemoveContainer" containerID="7d7ba4d9c16ded0590c50d499150efab9253643be033ab4652e15e47709aa4f1" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.614847 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-pbbt4" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.656265 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pbbt4"] Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.661158 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-pbbt4"] Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.805952 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5c87c48-f06a-4f35-a336-2d74a88c40ac" path="/var/lib/kubelet/pods/a5c87c48-f06a-4f35-a336-2d74a88c40ac/volumes" Jan 28 18:36:32 crc kubenswrapper[4767]: I0128 18:36:32.907351 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-866f46767b-52l4d"] Jan 28 18:36:32 crc kubenswrapper[4767]: W0128 18:36:32.923674 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod301e1444_f523_4a72_9064_306a63828663.slice/crio-64d5c4e85a3820e49b6c7b0dc7da44abdc7783e5ddd4f783d6ae1f7ef8d8ad52 WatchSource:0}: Error finding container 64d5c4e85a3820e49b6c7b0dc7da44abdc7783e5ddd4f783d6ae1f7ef8d8ad52: Status 404 returned error can't find the container with id 64d5c4e85a3820e49b6c7b0dc7da44abdc7783e5ddd4f783d6ae1f7ef8d8ad52 Jan 28 18:36:33 crc kubenswrapper[4767]: I0128 18:36:33.625430 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" event={"ID":"301e1444-f523-4a72-9064-306a63828663","Type":"ContainerStarted","Data":"73a45c246ca7022e11ca6d790048dd711f07a324d350a21c0c4bd2a283d91c44"} Jan 28 18:36:33 crc kubenswrapper[4767]: I0128 18:36:33.625683 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" event={"ID":"301e1444-f523-4a72-9064-306a63828663","Type":"ContainerStarted","Data":"64d5c4e85a3820e49b6c7b0dc7da44abdc7783e5ddd4f783d6ae1f7ef8d8ad52"} Jan 28 18:36:33 crc kubenswrapper[4767]: I0128 18:36:33.625711 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:33 crc kubenswrapper[4767]: I0128 18:36:33.656972 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" podStartSLOduration=28.656929547 podStartE2EDuration="28.656929547s" podCreationTimestamp="2026-01-28 18:36:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:33.651406466 +0000 UTC m=+399.615589340" watchObservedRunningTime="2026-01-28 18:36:33.656929547 +0000 UTC m=+399.621112411" Jan 28 18:36:33 crc kubenswrapper[4767]: I0128 18:36:33.772241 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-866f46767b-52l4d" Jan 28 18:36:37 crc kubenswrapper[4767]: I0128 18:36:37.794728 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5446967d5-cmrrc"] Jan 28 18:36:37 crc kubenswrapper[4767]: I0128 18:36:37.796247 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" podUID="c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" containerName="controller-manager" containerID="cri-o://311d9fded3e55c4ee06520c9d901821d3e654397cd9d6e847b373d90094304ad" gracePeriod=30 Jan 28 18:36:37 crc kubenswrapper[4767]: I0128 18:36:37.836871 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn"] Jan 28 18:36:37 crc kubenswrapper[4767]: I0128 18:36:37.837131 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" podUID="2bfc3ce4-de3d-4851-a062-836454d70736" containerName="route-controller-manager" containerID="cri-o://0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37" gracePeriod=30 Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.329259 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.458177 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-client-ca\") pod \"2bfc3ce4-de3d-4851-a062-836454d70736\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.458263 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfc3ce4-de3d-4851-a062-836454d70736-serving-cert\") pod \"2bfc3ce4-de3d-4851-a062-836454d70736\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.458322 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-config\") pod \"2bfc3ce4-de3d-4851-a062-836454d70736\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.458364 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjznp\" (UniqueName: \"kubernetes.io/projected/2bfc3ce4-de3d-4851-a062-836454d70736-kube-api-access-rjznp\") pod \"2bfc3ce4-de3d-4851-a062-836454d70736\" (UID: \"2bfc3ce4-de3d-4851-a062-836454d70736\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.459640 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-config" (OuterVolumeSpecName: "config") pod "2bfc3ce4-de3d-4851-a062-836454d70736" (UID: "2bfc3ce4-de3d-4851-a062-836454d70736"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.460076 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-client-ca" (OuterVolumeSpecName: "client-ca") pod "2bfc3ce4-de3d-4851-a062-836454d70736" (UID: "2bfc3ce4-de3d-4851-a062-836454d70736"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.467238 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bfc3ce4-de3d-4851-a062-836454d70736-kube-api-access-rjznp" (OuterVolumeSpecName: "kube-api-access-rjznp") pod "2bfc3ce4-de3d-4851-a062-836454d70736" (UID: "2bfc3ce4-de3d-4851-a062-836454d70736"). InnerVolumeSpecName "kube-api-access-rjznp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.475232 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2bfc3ce4-de3d-4851-a062-836454d70736-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "2bfc3ce4-de3d-4851-a062-836454d70736" (UID: "2bfc3ce4-de3d-4851-a062-836454d70736"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.561174 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjznp\" (UniqueName: \"kubernetes.io/projected/2bfc3ce4-de3d-4851-a062-836454d70736-kube-api-access-rjznp\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.561255 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.561270 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2bfc3ce4-de3d-4851-a062-836454d70736-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.561281 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2bfc3ce4-de3d-4851-a062-836454d70736-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.657728 4767 generic.go:334] "Generic (PLEG): container finished" podID="c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" containerID="311d9fded3e55c4ee06520c9d901821d3e654397cd9d6e847b373d90094304ad" exitCode=0 Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.657882 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" event={"ID":"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88","Type":"ContainerDied","Data":"311d9fded3e55c4ee06520c9d901821d3e654397cd9d6e847b373d90094304ad"} Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.659687 4767 generic.go:334] "Generic (PLEG): container finished" podID="2bfc3ce4-de3d-4851-a062-836454d70736" containerID="0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37" exitCode=0 Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.659714 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" event={"ID":"2bfc3ce4-de3d-4851-a062-836454d70736","Type":"ContainerDied","Data":"0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37"} Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.659731 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" event={"ID":"2bfc3ce4-de3d-4851-a062-836454d70736","Type":"ContainerDied","Data":"6bac94648c30764c680df8907db40d2607349bbd254e5e92ad9f4b3430d0e7cd"} Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.659751 4767 scope.go:117] "RemoveContainer" containerID="0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.659918 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.697347 4767 scope.go:117] "RemoveContainer" containerID="0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37" Jan 28 18:36:38 crc kubenswrapper[4767]: E0128 18:36:38.698477 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37\": container with ID starting with 0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37 not found: ID does not exist" containerID="0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.698523 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37"} err="failed to get container status \"0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37\": rpc error: code = NotFound desc = could not find container \"0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37\": container with ID starting with 0ce7d5a92f2d80e323bcb684f06baae14cbb4e1a60d87521e75b238d588acc37 not found: ID does not exist" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.703556 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn"] Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.708159 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-9f6f85dd9-6nmnn"] Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.804580 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bfc3ce4-de3d-4851-a062-836454d70736" path="/var/lib/kubelet/pods/2bfc3ce4-de3d-4851-a062-836454d70736/volumes" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.862559 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.969870 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqggk\" (UniqueName: \"kubernetes.io/projected/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-kube-api-access-rqggk\") pod \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.969998 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-client-ca\") pod \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.970128 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-config\") pod \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.970271 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-proxy-ca-bundles\") pod \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.970317 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-serving-cert\") pod \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\" (UID: \"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88\") " Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.971171 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-client-ca" (OuterVolumeSpecName: "client-ca") pod "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" (UID: "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.971425 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" (UID: "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.971452 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-config" (OuterVolumeSpecName: "config") pod "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" (UID: "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.973965 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" (UID: "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:38 crc kubenswrapper[4767]: I0128 18:36:38.974577 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-kube-api-access-rqggk" (OuterVolumeSpecName: "kube-api-access-rqggk") pod "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" (UID: "c3ccc07c-ab09-4b4c-acfb-c9209ba18d88"). InnerVolumeSpecName "kube-api-access-rqggk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.071722 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.071762 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.071774 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.071785 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.071794 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqggk\" (UniqueName: \"kubernetes.io/projected/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88-kube-api-access-rqggk\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.530512 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb"] Jan 28 18:36:39 crc kubenswrapper[4767]: E0128 18:36:39.531420 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bfc3ce4-de3d-4851-a062-836454d70736" containerName="route-controller-manager" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.531555 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bfc3ce4-de3d-4851-a062-836454d70736" containerName="route-controller-manager" Jan 28 18:36:39 crc kubenswrapper[4767]: E0128 18:36:39.531654 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" containerName="controller-manager" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.531728 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" containerName="controller-manager" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.531930 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bfc3ce4-de3d-4851-a062-836454d70736" containerName="route-controller-manager" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.532029 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" containerName="controller-manager" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.532604 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.533951 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4"] Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.534960 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.537861 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.538878 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.537926 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.539090 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.539094 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.540365 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.542201 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4"] Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.546258 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb"] Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.576363 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/562357a2-03ca-406d-88e5-beb8f2a07d6f-serving-cert\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578558 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-serving-cert\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578598 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-proxy-ca-bundles\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578643 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-client-ca\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578678 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-config\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578706 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwn22\" (UniqueName: \"kubernetes.io/projected/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-kube-api-access-nwn22\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578789 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-config\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578860 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-client-ca\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.578890 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krlnr\" (UniqueName: \"kubernetes.io/projected/562357a2-03ca-406d-88e5-beb8f2a07d6f-kube-api-access-krlnr\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.667648 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" event={"ID":"c3ccc07c-ab09-4b4c-acfb-c9209ba18d88","Type":"ContainerDied","Data":"1c23714507860942fb8854fc55707bc277cbb901204f749f804923cf1ed069ad"} Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.667714 4767 scope.go:117] "RemoveContainer" containerID="311d9fded3e55c4ee06520c9d901821d3e654397cd9d6e847b373d90094304ad" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.667753 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5446967d5-cmrrc" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680600 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-client-ca\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680688 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-config\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680717 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwn22\" (UniqueName: \"kubernetes.io/projected/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-kube-api-access-nwn22\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680753 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-config\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680792 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-client-ca\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680832 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krlnr\" (UniqueName: \"kubernetes.io/projected/562357a2-03ca-406d-88e5-beb8f2a07d6f-kube-api-access-krlnr\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680859 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/562357a2-03ca-406d-88e5-beb8f2a07d6f-serving-cert\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680890 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-serving-cert\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.680927 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-proxy-ca-bundles\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.682007 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-config\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.682033 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-client-ca\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.682271 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-client-ca\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.682327 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-proxy-ca-bundles\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.684070 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-config\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.687973 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/562357a2-03ca-406d-88e5-beb8f2a07d6f-serving-cert\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.688462 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-serving-cert\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.700940 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krlnr\" (UniqueName: \"kubernetes.io/projected/562357a2-03ca-406d-88e5-beb8f2a07d6f-kube-api-access-krlnr\") pod \"controller-manager-78d4c9cd75-b6nt4\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.701818 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwn22\" (UniqueName: \"kubernetes.io/projected/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-kube-api-access-nwn22\") pod \"route-controller-manager-54fd9848c7-pd5mb\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.751239 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-5446967d5-cmrrc"] Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.754470 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-5446967d5-cmrrc"] Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.864668 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:39 crc kubenswrapper[4767]: I0128 18:36:39.875706 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.315152 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb"] Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.371631 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4"] Jan 28 18:36:40 crc kubenswrapper[4767]: W0128 18:36:40.379844 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod562357a2_03ca_406d_88e5_beb8f2a07d6f.slice/crio-883f702b983eec63b2c3f82d53f877887b5f1ba1bb959d5174572b75057e20bf WatchSource:0}: Error finding container 883f702b983eec63b2c3f82d53f877887b5f1ba1bb959d5174572b75057e20bf: Status 404 returned error can't find the container with id 883f702b983eec63b2c3f82d53f877887b5f1ba1bb959d5174572b75057e20bf Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.675684 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" event={"ID":"562357a2-03ca-406d-88e5-beb8f2a07d6f","Type":"ContainerStarted","Data":"a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262"} Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.675729 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" event={"ID":"562357a2-03ca-406d-88e5-beb8f2a07d6f","Type":"ContainerStarted","Data":"883f702b983eec63b2c3f82d53f877887b5f1ba1bb959d5174572b75057e20bf"} Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.676149 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.678231 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" event={"ID":"ded3d79e-37f8-4d76-9d0c-8590c1c6668d","Type":"ContainerStarted","Data":"50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b"} Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.678316 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" event={"ID":"ded3d79e-37f8-4d76-9d0c-8590c1c6668d","Type":"ContainerStarted","Data":"ec8c368b523d62d8a65c2b11e4a73e191b085fe809d19ce5620d0c1a56866f2d"} Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.678940 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.698527 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.698929 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" podStartSLOduration=3.698901737 podStartE2EDuration="3.698901737s" podCreationTimestamp="2026-01-28 18:36:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:40.698449623 +0000 UTC m=+406.662632497" watchObservedRunningTime="2026-01-28 18:36:40.698901737 +0000 UTC m=+406.663084611" Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.748576 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" podStartSLOduration=3.748544579 podStartE2EDuration="3.748544579s" podCreationTimestamp="2026-01-28 18:36:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:40.740638636 +0000 UTC m=+406.704821520" watchObservedRunningTime="2026-01-28 18:36:40.748544579 +0000 UTC m=+406.712727453" Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.803370 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3ccc07c-ab09-4b4c-acfb-c9209ba18d88" path="/var/lib/kubelet/pods/c3ccc07c-ab09-4b4c-acfb-c9209ba18d88/volumes" Jan 28 18:36:40 crc kubenswrapper[4767]: I0128 18:36:40.830146 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:45 crc kubenswrapper[4767]: I0128 18:36:45.460341 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:36:45 crc kubenswrapper[4767]: I0128 18:36:45.461025 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.837008 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gft5f"] Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.838271 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-gft5f" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="registry-server" containerID="cri-o://7f9ecf1abf37507d1245723219e7a04a604631a258966f076ba2ff0dcdded938" gracePeriod=30 Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.854789 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4lhmz"] Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.855068 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4lhmz" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="registry-server" containerID="cri-o://c63fbac1803dff0e8269dd74bb8c36afbc0863bdf1249d244fc4e1c380fedd94" gracePeriod=30 Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.859355 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxsvp"] Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.859596 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" containerID="cri-o://81b671827e3ee70774b7ba248bcd90381b82178dc2057126e88ff50973d47339" gracePeriod=30 Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.874847 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcqfz"] Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.875112 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qcqfz" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="registry-server" containerID="cri-o://78c683f03e61ce4c8ade51936d01fcc138f7aaf94c69a75adb014d646d82705e" gracePeriod=30 Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.886571 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ns2sx"] Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.886837 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ns2sx" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="registry-server" containerID="cri-o://eb0d1fcb7ddc7bad27ea096801c2acc3a5fccc1bcb1f584bed5928929edd6d3d" gracePeriod=30 Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.891649 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6nztk"] Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.892750 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.913277 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6nztk"] Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.988530 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bead226a-cfb9-45a4-b4ec-0c910a29c78a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.988615 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r8jl\" (UniqueName: \"kubernetes.io/projected/bead226a-cfb9-45a4-b4ec-0c910a29c78a-kube-api-access-8r8jl\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:47 crc kubenswrapper[4767]: I0128 18:36:47.988654 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bead226a-cfb9-45a4-b4ec-0c910a29c78a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.090229 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bead226a-cfb9-45a4-b4ec-0c910a29c78a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.090288 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bead226a-cfb9-45a4-b4ec-0c910a29c78a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.090348 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r8jl\" (UniqueName: \"kubernetes.io/projected/bead226a-cfb9-45a4-b4ec-0c910a29c78a-kube-api-access-8r8jl\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.091744 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bead226a-cfb9-45a4-b4ec-0c910a29c78a-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.095780 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/bead226a-cfb9-45a4-b4ec-0c910a29c78a-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.106769 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r8jl\" (UniqueName: \"kubernetes.io/projected/bead226a-cfb9-45a4-b4ec-0c910a29c78a-kube-api-access-8r8jl\") pod \"marketplace-operator-79b997595-6nztk\" (UID: \"bead226a-cfb9-45a4-b4ec-0c910a29c78a\") " pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.215460 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.601433 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6nztk"] Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.743412 4767 generic.go:334] "Generic (PLEG): container finished" podID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerID="7f9ecf1abf37507d1245723219e7a04a604631a258966f076ba2ff0dcdded938" exitCode=0 Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.743474 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gft5f" event={"ID":"daa183e4-f49d-4f7d-9f9b-66e42f869297","Type":"ContainerDied","Data":"7f9ecf1abf37507d1245723219e7a04a604631a258966f076ba2ff0dcdded938"} Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.745772 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-gxsvp_b57f864b-8ab2-499a-a47e-b4a4c62842e7/marketplace-operator/1.log" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.745818 4767 generic.go:334] "Generic (PLEG): container finished" podID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerID="81b671827e3ee70774b7ba248bcd90381b82178dc2057126e88ff50973d47339" exitCode=0 Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.745876 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" event={"ID":"b57f864b-8ab2-499a-a47e-b4a4c62842e7","Type":"ContainerDied","Data":"81b671827e3ee70774b7ba248bcd90381b82178dc2057126e88ff50973d47339"} Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.745907 4767 scope.go:117] "RemoveContainer" containerID="8ba8203a908a2daf68edc5dec05811ae867d067dfcda31989ef6475b995f77fc" Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.748724 4767 generic.go:334] "Generic (PLEG): container finished" podID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerID="78c683f03e61ce4c8ade51936d01fcc138f7aaf94c69a75adb014d646d82705e" exitCode=0 Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.748794 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcqfz" event={"ID":"eff110e4-7a33-4782-86e1-efff7c646e6f","Type":"ContainerDied","Data":"78c683f03e61ce4c8ade51936d01fcc138f7aaf94c69a75adb014d646d82705e"} Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.750437 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" event={"ID":"bead226a-cfb9-45a4-b4ec-0c910a29c78a","Type":"ContainerStarted","Data":"5ec634e3e0171a01e2221cd05a06049f3609c7f8813aa38ab76c4aa74b14d588"} Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.753685 4767 generic.go:334] "Generic (PLEG): container finished" podID="2d04606e-e735-4d65-b208-b39f04aa1630" containerID="eb0d1fcb7ddc7bad27ea096801c2acc3a5fccc1bcb1f584bed5928929edd6d3d" exitCode=0 Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.753809 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ns2sx" event={"ID":"2d04606e-e735-4d65-b208-b39f04aa1630","Type":"ContainerDied","Data":"eb0d1fcb7ddc7bad27ea096801c2acc3a5fccc1bcb1f584bed5928929edd6d3d"} Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.756832 4767 generic.go:334] "Generic (PLEG): container finished" podID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerID="c63fbac1803dff0e8269dd74bb8c36afbc0863bdf1249d244fc4e1c380fedd94" exitCode=0 Jan 28 18:36:48 crc kubenswrapper[4767]: I0128 18:36:48.756883 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lhmz" event={"ID":"8d36dd2c-a6e9-4369-aae5-c657695233a5","Type":"ContainerDied","Data":"c63fbac1803dff0e8269dd74bb8c36afbc0863bdf1249d244fc4e1c380fedd94"} Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.392864 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.398813 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.514225 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tx7zn\" (UniqueName: \"kubernetes.io/projected/daa183e4-f49d-4f7d-9f9b-66e42f869297-kube-api-access-tx7zn\") pod \"daa183e4-f49d-4f7d-9f9b-66e42f869297\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.514651 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkq77\" (UniqueName: \"kubernetes.io/projected/8d36dd2c-a6e9-4369-aae5-c657695233a5-kube-api-access-nkq77\") pod \"8d36dd2c-a6e9-4369-aae5-c657695233a5\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.514683 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-catalog-content\") pod \"8d36dd2c-a6e9-4369-aae5-c657695233a5\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.514730 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-utilities\") pod \"daa183e4-f49d-4f7d-9f9b-66e42f869297\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.514795 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-utilities\") pod \"8d36dd2c-a6e9-4369-aae5-c657695233a5\" (UID: \"8d36dd2c-a6e9-4369-aae5-c657695233a5\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.514848 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-catalog-content\") pod \"daa183e4-f49d-4f7d-9f9b-66e42f869297\" (UID: \"daa183e4-f49d-4f7d-9f9b-66e42f869297\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.516918 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-utilities" (OuterVolumeSpecName: "utilities") pod "8d36dd2c-a6e9-4369-aae5-c657695233a5" (UID: "8d36dd2c-a6e9-4369-aae5-c657695233a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.517010 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-utilities" (OuterVolumeSpecName: "utilities") pod "daa183e4-f49d-4f7d-9f9b-66e42f869297" (UID: "daa183e4-f49d-4f7d-9f9b-66e42f869297"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.528077 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/daa183e4-f49d-4f7d-9f9b-66e42f869297-kube-api-access-tx7zn" (OuterVolumeSpecName: "kube-api-access-tx7zn") pod "daa183e4-f49d-4f7d-9f9b-66e42f869297" (UID: "daa183e4-f49d-4f7d-9f9b-66e42f869297"). InnerVolumeSpecName "kube-api-access-tx7zn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.529066 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8d36dd2c-a6e9-4369-aae5-c657695233a5-kube-api-access-nkq77" (OuterVolumeSpecName: "kube-api-access-nkq77") pod "8d36dd2c-a6e9-4369-aae5-c657695233a5" (UID: "8d36dd2c-a6e9-4369-aae5-c657695233a5"). InnerVolumeSpecName "kube-api-access-nkq77". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.591180 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8d36dd2c-a6e9-4369-aae5-c657695233a5" (UID: "8d36dd2c-a6e9-4369-aae5-c657695233a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.597560 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "daa183e4-f49d-4f7d-9f9b-66e42f869297" (UID: "daa183e4-f49d-4f7d-9f9b-66e42f869297"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.616324 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.616370 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tx7zn\" (UniqueName: \"kubernetes.io/projected/daa183e4-f49d-4f7d-9f9b-66e42f869297-kube-api-access-tx7zn\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.616384 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkq77\" (UniqueName: \"kubernetes.io/projected/8d36dd2c-a6e9-4369-aae5-c657695233a5-kube-api-access-nkq77\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.616393 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.616403 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/daa183e4-f49d-4f7d-9f9b-66e42f869297-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.616411 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8d36dd2c-a6e9-4369-aae5-c657695233a5-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.704930 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.763841 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" event={"ID":"bead226a-cfb9-45a4-b4ec-0c910a29c78a","Type":"ContainerStarted","Data":"9426fdbd597920c3d8ac7fefa24a3f66fea8612f6905d32cd33ba44e6a1ad25b"} Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.765906 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4lhmz" event={"ID":"8d36dd2c-a6e9-4369-aae5-c657695233a5","Type":"ContainerDied","Data":"041f357abbc780a02f0e4641f869fc961dbb7197979b7ce3a58f1cf5d4996ad9"} Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.765929 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4lhmz" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.765982 4767 scope.go:117] "RemoveContainer" containerID="c63fbac1803dff0e8269dd74bb8c36afbc0863bdf1249d244fc4e1c380fedd94" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.769342 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-gft5f" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.769339 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-gft5f" event={"ID":"daa183e4-f49d-4f7d-9f9b-66e42f869297","Type":"ContainerDied","Data":"a9f1dc63e36e9c185de8451a5fbbf9c0053998a81ad5fa53f7ab6e0144f2568b"} Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.773288 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" event={"ID":"b57f864b-8ab2-499a-a47e-b4a4c62842e7","Type":"ContainerDied","Data":"df375098e14acb6e947cc1da2ce4705c5e16f79c0372d8d3185e2c36a839bed2"} Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.773367 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-gxsvp" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.786905 4767 scope.go:117] "RemoveContainer" containerID="d2c3c4c294e1f04ffbbb5488a7c5de85b971fc91ff8105e19d696958350c523c" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.791191 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" podStartSLOduration=2.791167478 podStartE2EDuration="2.791167478s" podCreationTimestamp="2026-01-28 18:36:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:36:49.778419985 +0000 UTC m=+415.742602869" watchObservedRunningTime="2026-01-28 18:36:49.791167478 +0000 UTC m=+415.755350362" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.822001 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-operator-metrics\") pod \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.822069 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-trusted-ca\") pod \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.822105 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ss686\" (UniqueName: \"kubernetes.io/projected/b57f864b-8ab2-499a-a47e-b4a4c62842e7-kube-api-access-ss686\") pod \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\" (UID: \"b57f864b-8ab2-499a-a47e-b4a4c62842e7\") " Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.826283 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-gft5f"] Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.827995 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b57f864b-8ab2-499a-a47e-b4a4c62842e7" (UID: "b57f864b-8ab2-499a-a47e-b4a4c62842e7"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.834050 4767 scope.go:117] "RemoveContainer" containerID="13dfcd72212b62ec61c83184aa94965f05cfeff96d2690fdc622ee1bf63c2289" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.856777 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b57f864b-8ab2-499a-a47e-b4a4c62842e7-kube-api-access-ss686" (OuterVolumeSpecName: "kube-api-access-ss686") pod "b57f864b-8ab2-499a-a47e-b4a4c62842e7" (UID: "b57f864b-8ab2-499a-a47e-b4a4c62842e7"). InnerVolumeSpecName "kube-api-access-ss686". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.863260 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b57f864b-8ab2-499a-a47e-b4a4c62842e7" (UID: "b57f864b-8ab2-499a-a47e-b4a4c62842e7"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.865697 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-gft5f"] Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.872549 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4lhmz"] Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.878585 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4lhmz"] Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.900266 4767 scope.go:117] "RemoveContainer" containerID="7f9ecf1abf37507d1245723219e7a04a604631a258966f076ba2ff0dcdded938" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.917087 4767 scope.go:117] "RemoveContainer" containerID="611188ca123c9d9653f7d7addf530286cc5e5a1ba26a1dd06f33a6ac4dc49bf8" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.931031 4767 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.931070 4767 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b57f864b-8ab2-499a-a47e-b4a4c62842e7-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.931519 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ss686\" (UniqueName: \"kubernetes.io/projected/b57f864b-8ab2-499a-a47e-b4a4c62842e7-kube-api-access-ss686\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.940121 4767 scope.go:117] "RemoveContainer" containerID="85d8324ce62e23eb0ed1790e5a1d65bb4ed5c7276d129edf664cdf2fd7a36ed0" Jan 28 18:36:49 crc kubenswrapper[4767]: I0128 18:36:49.959571 4767 scope.go:117] "RemoveContainer" containerID="81b671827e3ee70774b7ba248bcd90381b82178dc2057126e88ff50973d47339" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.124668 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.130514 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.137085 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxsvp"] Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.140391 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-gxsvp"] Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.233656 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-catalog-content\") pod \"eff110e4-7a33-4782-86e1-efff7c646e6f\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.233732 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-utilities\") pod \"2d04606e-e735-4d65-b208-b39f04aa1630\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.233800 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5g7k\" (UniqueName: \"kubernetes.io/projected/eff110e4-7a33-4782-86e1-efff7c646e6f-kube-api-access-h5g7k\") pod \"eff110e4-7a33-4782-86e1-efff7c646e6f\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.233840 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qzxlx\" (UniqueName: \"kubernetes.io/projected/2d04606e-e735-4d65-b208-b39f04aa1630-kube-api-access-qzxlx\") pod \"2d04606e-e735-4d65-b208-b39f04aa1630\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.233882 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-utilities\") pod \"eff110e4-7a33-4782-86e1-efff7c646e6f\" (UID: \"eff110e4-7a33-4782-86e1-efff7c646e6f\") " Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.233901 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-catalog-content\") pod \"2d04606e-e735-4d65-b208-b39f04aa1630\" (UID: \"2d04606e-e735-4d65-b208-b39f04aa1630\") " Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.235519 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-utilities" (OuterVolumeSpecName: "utilities") pod "eff110e4-7a33-4782-86e1-efff7c646e6f" (UID: "eff110e4-7a33-4782-86e1-efff7c646e6f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.235623 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-utilities" (OuterVolumeSpecName: "utilities") pod "2d04606e-e735-4d65-b208-b39f04aa1630" (UID: "2d04606e-e735-4d65-b208-b39f04aa1630"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.239897 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eff110e4-7a33-4782-86e1-efff7c646e6f-kube-api-access-h5g7k" (OuterVolumeSpecName: "kube-api-access-h5g7k") pod "eff110e4-7a33-4782-86e1-efff7c646e6f" (UID: "eff110e4-7a33-4782-86e1-efff7c646e6f"). InnerVolumeSpecName "kube-api-access-h5g7k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.240262 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2d04606e-e735-4d65-b208-b39f04aa1630-kube-api-access-qzxlx" (OuterVolumeSpecName: "kube-api-access-qzxlx") pod "2d04606e-e735-4d65-b208-b39f04aa1630" (UID: "2d04606e-e735-4d65-b208-b39f04aa1630"). InnerVolumeSpecName "kube-api-access-qzxlx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.267901 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "eff110e4-7a33-4782-86e1-efff7c646e6f" (UID: "eff110e4-7a33-4782-86e1-efff7c646e6f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.335131 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5g7k\" (UniqueName: \"kubernetes.io/projected/eff110e4-7a33-4782-86e1-efff7c646e6f-kube-api-access-h5g7k\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.335176 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qzxlx\" (UniqueName: \"kubernetes.io/projected/2d04606e-e735-4d65-b208-b39f04aa1630-kube-api-access-qzxlx\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.335190 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.335201 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/eff110e4-7a33-4782-86e1-efff7c646e6f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.335229 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.352040 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2d04606e-e735-4d65-b208-b39f04aa1630" (UID: "2d04606e-e735-4d65-b208-b39f04aa1630"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.437339 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2d04606e-e735-4d65-b208-b39f04aa1630-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.791527 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qcqfz" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.791468 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qcqfz" event={"ID":"eff110e4-7a33-4782-86e1-efff7c646e6f","Type":"ContainerDied","Data":"cbc0b597a869547186149a1268e53b1ab0d5d5099d47e1a8573bd60a43388fa6"} Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.792516 4767 scope.go:117] "RemoveContainer" containerID="78c683f03e61ce4c8ade51936d01fcc138f7aaf94c69a75adb014d646d82705e" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.800880 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.802336 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" path="/var/lib/kubelet/pods/8d36dd2c-a6e9-4369-aae5-c657695233a5/volumes" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.803317 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" path="/var/lib/kubelet/pods/b57f864b-8ab2-499a-a47e-b4a4c62842e7/volumes" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.804005 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" path="/var/lib/kubelet/pods/daa183e4-f49d-4f7d-9f9b-66e42f869297/volumes" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.806528 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.806587 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ns2sx" event={"ID":"2d04606e-e735-4d65-b208-b39f04aa1630","Type":"ContainerDied","Data":"021c7926debadbf12ba87aa1ac151db9b609884bfbde068ee59abf0d034e71bb"} Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.810235 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-6nztk" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.815035 4767 scope.go:117] "RemoveContainer" containerID="644a56fa9825c6197d0fce32b243da8c744df96f93f670f6361ed5b660e19177" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.864394 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcqfz"] Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.866499 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qcqfz"] Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.866629 4767 scope.go:117] "RemoveContainer" containerID="dc68665c12e4570b764a6f86ba83232a4c75b921eafc2d07525903d485eed430" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.882115 4767 scope.go:117] "RemoveContainer" containerID="eb0d1fcb7ddc7bad27ea096801c2acc3a5fccc1bcb1f584bed5928929edd6d3d" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.903263 4767 scope.go:117] "RemoveContainer" containerID="b62777e57f4e819b3e489b8744ede348486bcb0bb56c497a3e1e7a9acf7cbe3d" Jan 28 18:36:50 crc kubenswrapper[4767]: I0128 18:36:50.925830 4767 scope.go:117] "RemoveContainer" containerID="13dab8432247edf469f61177543224f8e3cf4f9e07e80586ffa37bb0a9cfaac3" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458178 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-qf5wb"] Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458676 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458717 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458774 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458794 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458810 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458825 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458843 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458856 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458876 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458892 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458915 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458930 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458944 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458958 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.458983 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.458999 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.459022 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459037 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.459055 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459069 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.459089 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459104 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="extract-utilities" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.459125 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459138 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.459153 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459166 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="extract-content" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.459187 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459201 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459440 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8d36dd2c-a6e9-4369-aae5-c657695233a5" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459467 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459489 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459508 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="daa183e4-f49d-4f7d-9f9b-66e42f869297" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459534 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459562 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459593 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" containerName="registry-server" Jan 28 18:36:51 crc kubenswrapper[4767]: E0128 18:36:51.459810 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.459828 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b57f864b-8ab2-499a-a47e-b4a4c62842e7" containerName="marketplace-operator" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.467873 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.473437 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.478705 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qf5wb"] Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.577910 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btl9k\" (UniqueName: \"kubernetes.io/projected/0f72d947-439a-4097-bdd3-f695dc72ae90-kube-api-access-btl9k\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.577965 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f72d947-439a-4097-bdd3-f695dc72ae90-catalog-content\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.578119 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f72d947-439a-4097-bdd3-f695dc72ae90-utilities\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.679726 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f72d947-439a-4097-bdd3-f695dc72ae90-utilities\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.679827 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btl9k\" (UniqueName: \"kubernetes.io/projected/0f72d947-439a-4097-bdd3-f695dc72ae90-kube-api-access-btl9k\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.679868 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f72d947-439a-4097-bdd3-f695dc72ae90-catalog-content\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.680435 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0f72d947-439a-4097-bdd3-f695dc72ae90-utilities\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.682120 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0f72d947-439a-4097-bdd3-f695dc72ae90-catalog-content\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.701487 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btl9k\" (UniqueName: \"kubernetes.io/projected/0f72d947-439a-4097-bdd3-f695dc72ae90-kube-api-access-btl9k\") pod \"certified-operators-qf5wb\" (UID: \"0f72d947-439a-4097-bdd3-f695dc72ae90\") " pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:51 crc kubenswrapper[4767]: I0128 18:36:51.794001 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.216034 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-qf5wb"] Jan 28 18:36:52 crc kubenswrapper[4767]: W0128 18:36:52.227891 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f72d947_439a_4097_bdd3_f695dc72ae90.slice/crio-a46db80ab3acdaaaf3964b856c800dd3911de8c9a4ddf08ce6673066194a0242 WatchSource:0}: Error finding container a46db80ab3acdaaaf3964b856c800dd3911de8c9a4ddf08ce6673066194a0242: Status 404 returned error can't find the container with id a46db80ab3acdaaaf3964b856c800dd3911de8c9a4ddf08ce6673066194a0242 Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.804065 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eff110e4-7a33-4782-86e1-efff7c646e6f" path="/var/lib/kubelet/pods/eff110e4-7a33-4782-86e1-efff7c646e6f/volumes" Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.825228 4767 generic.go:334] "Generic (PLEG): container finished" podID="0f72d947-439a-4097-bdd3-f695dc72ae90" containerID="fc347614b9d84d014d73bb1399a297b6f6126ad9e0caf80669d46bd0f34cea13" exitCode=0 Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.825246 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qf5wb" event={"ID":"0f72d947-439a-4097-bdd3-f695dc72ae90","Type":"ContainerDied","Data":"fc347614b9d84d014d73bb1399a297b6f6126ad9e0caf80669d46bd0f34cea13"} Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.825340 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qf5wb" event={"ID":"0f72d947-439a-4097-bdd3-f695dc72ae90","Type":"ContainerStarted","Data":"a46db80ab3acdaaaf3964b856c800dd3911de8c9a4ddf08ce6673066194a0242"} Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.827478 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.865759 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-8mdrf"] Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.867132 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.869842 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.870960 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8mdrf"] Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.998385 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sb67m\" (UniqueName: \"kubernetes.io/projected/7644cd39-83ef-4613-ac2e-774f7d8efd0c-kube-api-access-sb67m\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.998497 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7644cd39-83ef-4613-ac2e-774f7d8efd0c-utilities\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:52 crc kubenswrapper[4767]: I0128 18:36:52.998538 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7644cd39-83ef-4613-ac2e-774f7d8efd0c-catalog-content\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.100815 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sb67m\" (UniqueName: \"kubernetes.io/projected/7644cd39-83ef-4613-ac2e-774f7d8efd0c-kube-api-access-sb67m\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.101440 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7644cd39-83ef-4613-ac2e-774f7d8efd0c-utilities\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.101473 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7644cd39-83ef-4613-ac2e-774f7d8efd0c-catalog-content\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.102014 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7644cd39-83ef-4613-ac2e-774f7d8efd0c-utilities\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.102108 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7644cd39-83ef-4613-ac2e-774f7d8efd0c-catalog-content\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.125790 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sb67m\" (UniqueName: \"kubernetes.io/projected/7644cd39-83ef-4613-ac2e-774f7d8efd0c-kube-api-access-sb67m\") pod \"community-operators-8mdrf\" (UID: \"7644cd39-83ef-4613-ac2e-774f7d8efd0c\") " pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.195026 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.624573 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-8mdrf"] Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.834486 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8mdrf" event={"ID":"7644cd39-83ef-4613-ac2e-774f7d8efd0c","Type":"ContainerStarted","Data":"f4f1a426c3ba2abe2517e1df312883f313460b301404edfaef27fc6d981de8e5"} Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.860454 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kwvlm"] Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.862074 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.865093 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 28 18:36:53 crc kubenswrapper[4767]: I0128 18:36:53.871159 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kwvlm"] Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.015697 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-catalog-content\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.016097 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-utilities\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.016495 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-87r5v\" (UniqueName: \"kubernetes.io/projected/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-kube-api-access-87r5v\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.117713 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-utilities\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.117770 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-87r5v\" (UniqueName: \"kubernetes.io/projected/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-kube-api-access-87r5v\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.117811 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-catalog-content\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.118417 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-catalog-content\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.118529 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-utilities\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.157974 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-87r5v\" (UniqueName: \"kubernetes.io/projected/d53c5b1a-7ab8-45e2-8a14-4698bb28b94e-kube-api-access-87r5v\") pod \"redhat-marketplace-kwvlm\" (UID: \"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e\") " pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:54 crc kubenswrapper[4767]: I0128 18:36:54.250599 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:36:56 crc kubenswrapper[4767]: I0128 18:36:54.842766 4767 generic.go:334] "Generic (PLEG): container finished" podID="7644cd39-83ef-4613-ac2e-774f7d8efd0c" containerID="d1a04f75886f947fbbf6325c47739adf0e12c2229b86aadc72eef2f124ade5bc" exitCode=0 Jan 28 18:36:56 crc kubenswrapper[4767]: I0128 18:36:54.843300 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8mdrf" event={"ID":"7644cd39-83ef-4613-ac2e-774f7d8efd0c","Type":"ContainerDied","Data":"d1a04f75886f947fbbf6325c47739adf0e12c2229b86aadc72eef2f124ade5bc"} Jan 28 18:36:56 crc kubenswrapper[4767]: I0128 18:36:54.847232 4767 generic.go:334] "Generic (PLEG): container finished" podID="0f72d947-439a-4097-bdd3-f695dc72ae90" containerID="bf905dbf32c6eb5968c42f1ec3c64afb297388b2eee86f240b14e0549d16bf24" exitCode=0 Jan 28 18:36:56 crc kubenswrapper[4767]: I0128 18:36:54.847266 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qf5wb" event={"ID":"0f72d947-439a-4097-bdd3-f695dc72ae90","Type":"ContainerDied","Data":"bf905dbf32c6eb5968c42f1ec3c64afb297388b2eee86f240b14e0549d16bf24"} Jan 28 18:36:56 crc kubenswrapper[4767]: I0128 18:36:56.866149 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-qf5wb" event={"ID":"0f72d947-439a-4097-bdd3-f695dc72ae90","Type":"ContainerStarted","Data":"07a980e98c4869566cf84293f817030e7706cb33b6da26745d72d8eb28519390"} Jan 28 18:36:56 crc kubenswrapper[4767]: I0128 18:36:56.888695 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-qf5wb" podStartSLOduration=2.180878107 podStartE2EDuration="5.888666353s" podCreationTimestamp="2026-01-28 18:36:51 +0000 UTC" firstStartedPulling="2026-01-28 18:36:52.827065654 +0000 UTC m=+418.791248538" lastFinishedPulling="2026-01-28 18:36:56.53485391 +0000 UTC m=+422.499036784" observedRunningTime="2026-01-28 18:36:56.885480334 +0000 UTC m=+422.849663218" watchObservedRunningTime="2026-01-28 18:36:56.888666353 +0000 UTC m=+422.852849227" Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.085801 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kwvlm"] Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.782198 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4"] Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.782741 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" podUID="562357a2-03ca-406d-88e5-beb8f2a07d6f" containerName="controller-manager" containerID="cri-o://a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262" gracePeriod=30 Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.871314 4767 generic.go:334] "Generic (PLEG): container finished" podID="d53c5b1a-7ab8-45e2-8a14-4698bb28b94e" containerID="68f17c5de413f70b6541635b9be4d89bb39af267fe8f8ee5fb3249570501d594" exitCode=0 Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.871374 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kwvlm" event={"ID":"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e","Type":"ContainerDied","Data":"68f17c5de413f70b6541635b9be4d89bb39af267fe8f8ee5fb3249570501d594"} Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.871397 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kwvlm" event={"ID":"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e","Type":"ContainerStarted","Data":"ea0b0420866c719237b9b20287de9ea33231f42001c2705707849c2e7d1d1615"} Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.873991 4767 generic.go:334] "Generic (PLEG): container finished" podID="7644cd39-83ef-4613-ac2e-774f7d8efd0c" containerID="13d526c9e28b60fb421bc682f2c785d987696f41fb1b1482c80fd1ceef50b4d3" exitCode=0 Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.874914 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8mdrf" event={"ID":"7644cd39-83ef-4613-ac2e-774f7d8efd0c","Type":"ContainerDied","Data":"13d526c9e28b60fb421bc682f2c785d987696f41fb1b1482c80fd1ceef50b4d3"} Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.884078 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb"] Jan 28 18:36:57 crc kubenswrapper[4767]: I0128 18:36:57.884365 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" podUID="ded3d79e-37f8-4d76-9d0c-8590c1c6668d" containerName="route-controller-manager" containerID="cri-o://50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b" gracePeriod=30 Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.507330 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.694514 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krlnr\" (UniqueName: \"kubernetes.io/projected/562357a2-03ca-406d-88e5-beb8f2a07d6f-kube-api-access-krlnr\") pod \"562357a2-03ca-406d-88e5-beb8f2a07d6f\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.694584 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-client-ca\") pod \"562357a2-03ca-406d-88e5-beb8f2a07d6f\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.694689 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-config\") pod \"562357a2-03ca-406d-88e5-beb8f2a07d6f\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.694722 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-proxy-ca-bundles\") pod \"562357a2-03ca-406d-88e5-beb8f2a07d6f\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.694752 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/562357a2-03ca-406d-88e5-beb8f2a07d6f-serving-cert\") pod \"562357a2-03ca-406d-88e5-beb8f2a07d6f\" (UID: \"562357a2-03ca-406d-88e5-beb8f2a07d6f\") " Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.695445 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-client-ca" (OuterVolumeSpecName: "client-ca") pod "562357a2-03ca-406d-88e5-beb8f2a07d6f" (UID: "562357a2-03ca-406d-88e5-beb8f2a07d6f"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.695844 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-config" (OuterVolumeSpecName: "config") pod "562357a2-03ca-406d-88e5-beb8f2a07d6f" (UID: "562357a2-03ca-406d-88e5-beb8f2a07d6f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.696335 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "562357a2-03ca-406d-88e5-beb8f2a07d6f" (UID: "562357a2-03ca-406d-88e5-beb8f2a07d6f"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.703058 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/562357a2-03ca-406d-88e5-beb8f2a07d6f-kube-api-access-krlnr" (OuterVolumeSpecName: "kube-api-access-krlnr") pod "562357a2-03ca-406d-88e5-beb8f2a07d6f" (UID: "562357a2-03ca-406d-88e5-beb8f2a07d6f"). InnerVolumeSpecName "kube-api-access-krlnr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.704845 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/562357a2-03ca-406d-88e5-beb8f2a07d6f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "562357a2-03ca-406d-88e5-beb8f2a07d6f" (UID: "562357a2-03ca-406d-88e5-beb8f2a07d6f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.797584 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.797668 4767 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.797722 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/562357a2-03ca-406d-88e5-beb8f2a07d6f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.797745 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krlnr\" (UniqueName: \"kubernetes.io/projected/562357a2-03ca-406d-88e5-beb8f2a07d6f-kube-api-access-krlnr\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.797794 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/562357a2-03ca-406d-88e5-beb8f2a07d6f-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.801932 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.882518 4767 generic.go:334] "Generic (PLEG): container finished" podID="ded3d79e-37f8-4d76-9d0c-8590c1c6668d" containerID="50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b" exitCode=0 Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.882573 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" event={"ID":"ded3d79e-37f8-4d76-9d0c-8590c1c6668d","Type":"ContainerDied","Data":"50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b"} Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.882639 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" event={"ID":"ded3d79e-37f8-4d76-9d0c-8590c1c6668d","Type":"ContainerDied","Data":"ec8c368b523d62d8a65c2b11e4a73e191b085fe809d19ce5620d0c1a56866f2d"} Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.882635 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.882665 4767 scope.go:117] "RemoveContainer" containerID="50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.884583 4767 generic.go:334] "Generic (PLEG): container finished" podID="562357a2-03ca-406d-88e5-beb8f2a07d6f" containerID="a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262" exitCode=0 Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.884636 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.884638 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" event={"ID":"562357a2-03ca-406d-88e5-beb8f2a07d6f","Type":"ContainerDied","Data":"a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262"} Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.884994 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4" event={"ID":"562357a2-03ca-406d-88e5-beb8f2a07d6f","Type":"ContainerDied","Data":"883f702b983eec63b2c3f82d53f877887b5f1ba1bb959d5174572b75057e20bf"} Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.926700 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4"] Jan 28 18:36:58 crc kubenswrapper[4767]: I0128 18:36:58.931364 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-78d4c9cd75-b6nt4"] Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.000687 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nwn22\" (UniqueName: \"kubernetes.io/projected/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-kube-api-access-nwn22\") pod \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.000888 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-serving-cert\") pod \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.000936 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-client-ca\") pod \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.001076 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-config\") pod \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\" (UID: \"ded3d79e-37f8-4d76-9d0c-8590c1c6668d\") " Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.002057 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-client-ca" (OuterVolumeSpecName: "client-ca") pod "ded3d79e-37f8-4d76-9d0c-8590c1c6668d" (UID: "ded3d79e-37f8-4d76-9d0c-8590c1c6668d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.002194 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-config" (OuterVolumeSpecName: "config") pod "ded3d79e-37f8-4d76-9d0c-8590c1c6668d" (UID: "ded3d79e-37f8-4d76-9d0c-8590c1c6668d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.004027 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "ded3d79e-37f8-4d76-9d0c-8590c1c6668d" (UID: "ded3d79e-37f8-4d76-9d0c-8590c1c6668d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.004409 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-kube-api-access-nwn22" (OuterVolumeSpecName: "kube-api-access-nwn22") pod "ded3d79e-37f8-4d76-9d0c-8590c1c6668d" (UID: "ded3d79e-37f8-4d76-9d0c-8590c1c6668d"). InnerVolumeSpecName "kube-api-access-nwn22". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.021157 4767 scope.go:117] "RemoveContainer" containerID="50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b" Jan 28 18:36:59 crc kubenswrapper[4767]: E0128 18:36:59.021723 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b\": container with ID starting with 50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b not found: ID does not exist" containerID="50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.021799 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b"} err="failed to get container status \"50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b\": rpc error: code = NotFound desc = could not find container \"50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b\": container with ID starting with 50b4dcea0208df51623d7d9d909c729e6c9e63a5230a95ba059f6421e6ff207b not found: ID does not exist" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.021841 4767 scope.go:117] "RemoveContainer" containerID="a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.041306 4767 scope.go:117] "RemoveContainer" containerID="a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262" Jan 28 18:36:59 crc kubenswrapper[4767]: E0128 18:36:59.042391 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262\": container with ID starting with a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262 not found: ID does not exist" containerID="a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.042426 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262"} err="failed to get container status \"a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262\": rpc error: code = NotFound desc = could not find container \"a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262\": container with ID starting with a3693cd4020a5df9d1b3147f08ab332449402deac1b19a3208cd3c0ebf565262 not found: ID does not exist" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.102523 4767 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.102569 4767 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.102581 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.102595 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nwn22\" (UniqueName: \"kubernetes.io/projected/ded3d79e-37f8-4d76-9d0c-8590c1c6668d-kube-api-access-nwn22\") on node \"crc\" DevicePath \"\"" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.210173 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb"] Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.215644 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-54fd9848c7-pd5mb"] Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.542821 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-b64f57cb-n58sc"] Jan 28 18:36:59 crc kubenswrapper[4767]: E0128 18:36:59.543876 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="562357a2-03ca-406d-88e5-beb8f2a07d6f" containerName="controller-manager" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.543902 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="562357a2-03ca-406d-88e5-beb8f2a07d6f" containerName="controller-manager" Jan 28 18:36:59 crc kubenswrapper[4767]: E0128 18:36:59.543935 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ded3d79e-37f8-4d76-9d0c-8590c1c6668d" containerName="route-controller-manager" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.543946 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="ded3d79e-37f8-4d76-9d0c-8590c1c6668d" containerName="route-controller-manager" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.544103 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="ded3d79e-37f8-4d76-9d0c-8590c1c6668d" containerName="route-controller-manager" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.544129 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="562357a2-03ca-406d-88e5-beb8f2a07d6f" containerName="controller-manager" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.544880 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.548068 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.548820 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.548820 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.548961 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.550956 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.551455 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.551632 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj"] Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.552456 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.554795 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.555564 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.555678 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.555857 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.555974 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.556083 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.559268 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.563231 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj"] Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.577142 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b64f57cb-n58sc"] Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709145 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a53747b-0ffa-4e61-8638-3b508a7c6f84-config\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709231 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-serving-cert\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709250 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-proxy-ca-bundles\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709279 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m6d4\" (UniqueName: \"kubernetes.io/projected/1a53747b-0ffa-4e61-8638-3b508a7c6f84-kube-api-access-8m6d4\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709302 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a53747b-0ffa-4e61-8638-3b508a7c6f84-client-ca\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709323 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-client-ca\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709341 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-config\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709358 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgglc\" (UniqueName: \"kubernetes.io/projected/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-kube-api-access-wgglc\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.709378 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a53747b-0ffa-4e61-8638-3b508a7c6f84-serving-cert\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810166 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m6d4\" (UniqueName: \"kubernetes.io/projected/1a53747b-0ffa-4e61-8638-3b508a7c6f84-kube-api-access-8m6d4\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810232 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a53747b-0ffa-4e61-8638-3b508a7c6f84-client-ca\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810264 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-client-ca\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810282 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgglc\" (UniqueName: \"kubernetes.io/projected/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-kube-api-access-wgglc\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810298 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-config\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810330 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a53747b-0ffa-4e61-8638-3b508a7c6f84-serving-cert\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810368 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a53747b-0ffa-4e61-8638-3b508a7c6f84-config\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810409 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-serving-cert\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.810431 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-proxy-ca-bundles\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.812091 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-client-ca\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.812507 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-proxy-ca-bundles\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.812910 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/1a53747b-0ffa-4e61-8638-3b508a7c6f84-client-ca\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.812938 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a53747b-0ffa-4e61-8638-3b508a7c6f84-config\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.813145 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-config\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.818793 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1a53747b-0ffa-4e61-8638-3b508a7c6f84-serving-cert\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.826495 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-serving-cert\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.829328 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgglc\" (UniqueName: \"kubernetes.io/projected/fe0c7018-1a32-4775-8460-3d5c1d8edc0d-kube-api-access-wgglc\") pod \"controller-manager-b64f57cb-n58sc\" (UID: \"fe0c7018-1a32-4775-8460-3d5c1d8edc0d\") " pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.829854 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m6d4\" (UniqueName: \"kubernetes.io/projected/1a53747b-0ffa-4e61-8638-3b508a7c6f84-kube-api-access-8m6d4\") pod \"route-controller-manager-6cf4564844-jr9cj\" (UID: \"1a53747b-0ffa-4e61-8638-3b508a7c6f84\") " pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.895621 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-8mdrf" event={"ID":"7644cd39-83ef-4613-ac2e-774f7d8efd0c","Type":"ContainerStarted","Data":"de0a2da351464fc32ee646d7432312709adb47f20be91c11cfcf8da0ea3d211c"} Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.899752 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kwvlm" event={"ID":"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e","Type":"ContainerStarted","Data":"ba2631fdee2476ac01da496c7479cfc076a51a09eedd35548d015b0ffd9d2e0b"} Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.902189 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:36:59 crc kubenswrapper[4767]: I0128 18:36:59.913858 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.333681 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-8mdrf" podStartSLOduration=4.156360826 podStartE2EDuration="8.333659266s" podCreationTimestamp="2026-01-28 18:36:52 +0000 UTC" firstStartedPulling="2026-01-28 18:36:54.844642825 +0000 UTC m=+420.808825699" lastFinishedPulling="2026-01-28 18:36:59.021941265 +0000 UTC m=+424.986124139" observedRunningTime="2026-01-28 18:36:59.932197804 +0000 UTC m=+425.896380688" watchObservedRunningTime="2026-01-28 18:37:00.333659266 +0000 UTC m=+426.297842130" Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.334337 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-b64f57cb-n58sc"] Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.400239 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj"] Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.802371 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="562357a2-03ca-406d-88e5-beb8f2a07d6f" path="/var/lib/kubelet/pods/562357a2-03ca-406d-88e5-beb8f2a07d6f/volumes" Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.804075 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ded3d79e-37f8-4d76-9d0c-8590c1c6668d" path="/var/lib/kubelet/pods/ded3d79e-37f8-4d76-9d0c-8590c1c6668d/volumes" Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.915257 4767 generic.go:334] "Generic (PLEG): container finished" podID="d53c5b1a-7ab8-45e2-8a14-4698bb28b94e" containerID="ba2631fdee2476ac01da496c7479cfc076a51a09eedd35548d015b0ffd9d2e0b" exitCode=0 Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.915328 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kwvlm" event={"ID":"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e","Type":"ContainerDied","Data":"ba2631fdee2476ac01da496c7479cfc076a51a09eedd35548d015b0ffd9d2e0b"} Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.916518 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" event={"ID":"1a53747b-0ffa-4e61-8638-3b508a7c6f84","Type":"ContainerStarted","Data":"7dc9d702410ca7ab60a8eb45cc31db63a53d2618bddddf8fe88d32292733c532"} Jan 28 18:37:00 crc kubenswrapper[4767]: I0128 18:37:00.918280 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" event={"ID":"fe0c7018-1a32-4775-8460-3d5c1d8edc0d","Type":"ContainerStarted","Data":"75dd176d5fd6816aae69300ae8e772439976ff19198a1aa52816d69dc857b2ca"} Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.794769 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.795138 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.839581 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.926708 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" event={"ID":"1a53747b-0ffa-4e61-8638-3b508a7c6f84","Type":"ContainerStarted","Data":"e200e05648e2858f6ab428db696ddf646d0f036f4706c52d1dc590419c913a57"} Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.926982 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.928370 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" event={"ID":"fe0c7018-1a32-4775-8460-3d5c1d8edc0d","Type":"ContainerStarted","Data":"22a85d236cace95f40a12201e2aa1a1241414e41d6fb93788d194d7ac0054f67"} Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.929074 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.936714 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.937235 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.968407 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6cf4564844-jr9cj" podStartSLOduration=4.96837833 podStartE2EDuration="4.96837833s" podCreationTimestamp="2026-01-28 18:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:37:01.949569668 +0000 UTC m=+427.913752562" watchObservedRunningTime="2026-01-28 18:37:01.96837833 +0000 UTC m=+427.932561204" Jan 28 18:37:01 crc kubenswrapper[4767]: I0128 18:37:01.979935 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-qf5wb" Jan 28 18:37:02 crc kubenswrapper[4767]: I0128 18:37:02.005245 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-b64f57cb-n58sc" podStartSLOduration=5.005209656 podStartE2EDuration="5.005209656s" podCreationTimestamp="2026-01-28 18:36:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:37:02.003024239 +0000 UTC m=+427.967207113" watchObservedRunningTime="2026-01-28 18:37:02.005209656 +0000 UTC m=+427.969392530" Jan 28 18:37:03 crc kubenswrapper[4767]: I0128 18:37:03.195571 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:37:03 crc kubenswrapper[4767]: I0128 18:37:03.195972 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:37:03 crc kubenswrapper[4767]: I0128 18:37:03.242375 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:37:04 crc kubenswrapper[4767]: I0128 18:37:04.947694 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kwvlm" event={"ID":"d53c5b1a-7ab8-45e2-8a14-4698bb28b94e","Type":"ContainerStarted","Data":"8056a5e260573c73d1f31b6d8da754037578a67db193605c0434195efd4edaca"} Jan 28 18:37:04 crc kubenswrapper[4767]: I0128 18:37:04.997927 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-8mdrf" Jan 28 18:37:05 crc kubenswrapper[4767]: I0128 18:37:05.019069 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kwvlm" podStartSLOduration=5.853773073 podStartE2EDuration="12.019051761s" podCreationTimestamp="2026-01-28 18:36:53 +0000 UTC" firstStartedPulling="2026-01-28 18:36:57.874545406 +0000 UTC m=+423.838728280" lastFinishedPulling="2026-01-28 18:37:04.039824094 +0000 UTC m=+430.004006968" observedRunningTime="2026-01-28 18:37:04.966892411 +0000 UTC m=+430.931075285" watchObservedRunningTime="2026-01-28 18:37:05.019051761 +0000 UTC m=+430.983234635" Jan 28 18:37:14 crc kubenswrapper[4767]: I0128 18:37:14.250754 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:37:14 crc kubenswrapper[4767]: I0128 18:37:14.251128 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:37:14 crc kubenswrapper[4767]: I0128 18:37:14.290915 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:37:15 crc kubenswrapper[4767]: I0128 18:37:15.044260 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kwvlm" Jan 28 18:37:15 crc kubenswrapper[4767]: I0128 18:37:15.455369 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:37:15 crc kubenswrapper[4767]: I0128 18:37:15.455438 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:37:15 crc kubenswrapper[4767]: I0128 18:37:15.455533 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:37:15 crc kubenswrapper[4767]: I0128 18:37:15.456186 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"34c635ea69766969f7f4b7e505bfdf39a502ec67ed786aa17cc6f9134cc202ab"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:37:15 crc kubenswrapper[4767]: I0128 18:37:15.456258 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://34c635ea69766969f7f4b7e505bfdf39a502ec67ed786aa17cc6f9134cc202ab" gracePeriod=600 Jan 28 18:37:17 crc kubenswrapper[4767]: I0128 18:37:17.017077 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="34c635ea69766969f7f4b7e505bfdf39a502ec67ed786aa17cc6f9134cc202ab" exitCode=0 Jan 28 18:37:17 crc kubenswrapper[4767]: I0128 18:37:17.017186 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"34c635ea69766969f7f4b7e505bfdf39a502ec67ed786aa17cc6f9134cc202ab"} Jan 28 18:37:17 crc kubenswrapper[4767]: I0128 18:37:17.017670 4767 scope.go:117] "RemoveContainer" containerID="11a5dd49ddf60753c049c2fb852d720047db2f9ee2eb8455819e6d28b0400753" Jan 28 18:37:18 crc kubenswrapper[4767]: I0128 18:37:18.023912 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"d926a7f2dcbb421ddc0b4cecd49fbe2ef40e7e877b6d38a43f2f529c00e06b57"} Jan 28 18:37:20 crc kubenswrapper[4767]: I0128 18:37:20.823095 4767 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","burstable","pod2d04606e-e735-4d65-b208-b39f04aa1630"] err="unable to destroy cgroup paths for cgroup [kubepods burstable pod2d04606e-e735-4d65-b208-b39f04aa1630] : Timed out while waiting for systemd to remove kubepods-burstable-pod2d04606e_e735_4d65_b208_b39f04aa1630.slice" Jan 28 18:37:20 crc kubenswrapper[4767]: E0128 18:37:20.823962 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods burstable pod2d04606e-e735-4d65-b208-b39f04aa1630] : unable to destroy cgroup paths for cgroup [kubepods burstable pod2d04606e-e735-4d65-b208-b39f04aa1630] : Timed out while waiting for systemd to remove kubepods-burstable-pod2d04606e_e735_4d65_b208_b39f04aa1630.slice" pod="openshift-marketplace/redhat-operators-ns2sx" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.059008 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ns2sx" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.111271 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ns2sx"] Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.119045 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ns2sx"] Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.157800 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jfj5m"] Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.158871 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.163153 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.167751 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jfj5m"] Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.341983 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m56f\" (UniqueName: \"kubernetes.io/projected/d0824466-227b-42c2-995d-6ab5dde3f5c2-kube-api-access-6m56f\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.342144 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0824466-227b-42c2-995d-6ab5dde3f5c2-utilities\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.342222 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0824466-227b-42c2-995d-6ab5dde3f5c2-catalog-content\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.443265 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0824466-227b-42c2-995d-6ab5dde3f5c2-utilities\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.443353 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0824466-227b-42c2-995d-6ab5dde3f5c2-catalog-content\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.443384 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m56f\" (UniqueName: \"kubernetes.io/projected/d0824466-227b-42c2-995d-6ab5dde3f5c2-kube-api-access-6m56f\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.444023 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d0824466-227b-42c2-995d-6ab5dde3f5c2-utilities\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.444132 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d0824466-227b-42c2-995d-6ab5dde3f5c2-catalog-content\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.467759 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m56f\" (UniqueName: \"kubernetes.io/projected/d0824466-227b-42c2-995d-6ab5dde3f5c2-kube-api-access-6m56f\") pod \"redhat-operators-jfj5m\" (UID: \"d0824466-227b-42c2-995d-6ab5dde3f5c2\") " pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.479392 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:21 crc kubenswrapper[4767]: I0128 18:37:21.874187 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jfj5m"] Jan 28 18:37:21 crc kubenswrapper[4767]: W0128 18:37:21.880146 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd0824466_227b_42c2_995d_6ab5dde3f5c2.slice/crio-aef3bdd235670db1f208bb4d7a9cfd5e347cadf95a9136ebaef4499771f60081 WatchSource:0}: Error finding container aef3bdd235670db1f208bb4d7a9cfd5e347cadf95a9136ebaef4499771f60081: Status 404 returned error can't find the container with id aef3bdd235670db1f208bb4d7a9cfd5e347cadf95a9136ebaef4499771f60081 Jan 28 18:37:22 crc kubenswrapper[4767]: I0128 18:37:22.074139 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jfj5m" event={"ID":"d0824466-227b-42c2-995d-6ab5dde3f5c2","Type":"ContainerStarted","Data":"aef3bdd235670db1f208bb4d7a9cfd5e347cadf95a9136ebaef4499771f60081"} Jan 28 18:37:22 crc kubenswrapper[4767]: I0128 18:37:22.807852 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2d04606e-e735-4d65-b208-b39f04aa1630" path="/var/lib/kubelet/pods/2d04606e-e735-4d65-b208-b39f04aa1630/volumes" Jan 28 18:37:23 crc kubenswrapper[4767]: I0128 18:37:23.080846 4767 generic.go:334] "Generic (PLEG): container finished" podID="d0824466-227b-42c2-995d-6ab5dde3f5c2" containerID="19882bf9b741c72b2895e83a77a179ee9f753524070ec243ba8cba6e2d70098e" exitCode=0 Jan 28 18:37:23 crc kubenswrapper[4767]: I0128 18:37:23.080907 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jfj5m" event={"ID":"d0824466-227b-42c2-995d-6ab5dde3f5c2","Type":"ContainerDied","Data":"19882bf9b741c72b2895e83a77a179ee9f753524070ec243ba8cba6e2d70098e"} Jan 28 18:37:24 crc kubenswrapper[4767]: I0128 18:37:24.089844 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jfj5m" event={"ID":"d0824466-227b-42c2-995d-6ab5dde3f5c2","Type":"ContainerStarted","Data":"60bd480e3930e8d8f515f71360aedd13ec37651eec4cfb96213e02fb82b4f95e"} Jan 28 18:37:25 crc kubenswrapper[4767]: I0128 18:37:25.098975 4767 generic.go:334] "Generic (PLEG): container finished" podID="d0824466-227b-42c2-995d-6ab5dde3f5c2" containerID="60bd480e3930e8d8f515f71360aedd13ec37651eec4cfb96213e02fb82b4f95e" exitCode=0 Jan 28 18:37:25 crc kubenswrapper[4767]: I0128 18:37:25.099022 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jfj5m" event={"ID":"d0824466-227b-42c2-995d-6ab5dde3f5c2","Type":"ContainerDied","Data":"60bd480e3930e8d8f515f71360aedd13ec37651eec4cfb96213e02fb82b4f95e"} Jan 28 18:37:26 crc kubenswrapper[4767]: I0128 18:37:26.107312 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jfj5m" event={"ID":"d0824466-227b-42c2-995d-6ab5dde3f5c2","Type":"ContainerStarted","Data":"8ecb3fde03b53a5667e54d00d593495c1e671570c174f5cfb9d0c644c97fd3b3"} Jan 28 18:37:26 crc kubenswrapper[4767]: I0128 18:37:26.126922 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jfj5m" podStartSLOduration=2.522858437 podStartE2EDuration="5.126898732s" podCreationTimestamp="2026-01-28 18:37:21 +0000 UTC" firstStartedPulling="2026-01-28 18:37:23.08431102 +0000 UTC m=+449.048493894" lastFinishedPulling="2026-01-28 18:37:25.688351315 +0000 UTC m=+451.652534189" observedRunningTime="2026-01-28 18:37:26.124012263 +0000 UTC m=+452.088195157" watchObservedRunningTime="2026-01-28 18:37:26.126898732 +0000 UTC m=+452.091081606" Jan 28 18:37:31 crc kubenswrapper[4767]: I0128 18:37:31.480474 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:31 crc kubenswrapper[4767]: I0128 18:37:31.481002 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:32 crc kubenswrapper[4767]: I0128 18:37:32.530976 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jfj5m" podUID="d0824466-227b-42c2-995d-6ab5dde3f5c2" containerName="registry-server" probeResult="failure" output=< Jan 28 18:37:32 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 18:37:32 crc kubenswrapper[4767]: > Jan 28 18:37:41 crc kubenswrapper[4767]: I0128 18:37:41.525395 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:37:41 crc kubenswrapper[4767]: I0128 18:37:41.570660 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jfj5m" Jan 28 18:39:45 crc kubenswrapper[4767]: I0128 18:39:45.455802 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:39:45 crc kubenswrapper[4767]: I0128 18:39:45.457244 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:40:15 crc kubenswrapper[4767]: I0128 18:40:15.455463 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:40:15 crc kubenswrapper[4767]: I0128 18:40:15.456144 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:40:45 crc kubenswrapper[4767]: I0128 18:40:45.455017 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:40:45 crc kubenswrapper[4767]: I0128 18:40:45.455623 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:40:45 crc kubenswrapper[4767]: I0128 18:40:45.455674 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:40:45 crc kubenswrapper[4767]: I0128 18:40:45.456305 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d926a7f2dcbb421ddc0b4cecd49fbe2ef40e7e877b6d38a43f2f529c00e06b57"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:40:45 crc kubenswrapper[4767]: I0128 18:40:45.456364 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://d926a7f2dcbb421ddc0b4cecd49fbe2ef40e7e877b6d38a43f2f529c00e06b57" gracePeriod=600 Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.009104 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6jt7j"] Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.010391 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.033471 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6jt7j"] Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.095942 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e0e92156-728b-4d31-a078-abb2fa34fe27-registry-certificates\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.096017 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-bound-sa-token\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.096096 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.096155 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e0e92156-728b-4d31-a078-abb2fa34fe27-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.096192 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e0e92156-728b-4d31-a078-abb2fa34fe27-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.096276 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-registry-tls\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.096466 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5h5q6\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-kube-api-access-5h5q6\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.096564 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e0e92156-728b-4d31-a078-abb2fa34fe27-trusted-ca\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.122141 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198049 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e0e92156-728b-4d31-a078-abb2fa34fe27-registry-certificates\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198115 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-bound-sa-token\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198155 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e0e92156-728b-4d31-a078-abb2fa34fe27-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198179 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e0e92156-728b-4d31-a078-abb2fa34fe27-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198222 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-registry-tls\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198249 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5h5q6\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-kube-api-access-5h5q6\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198277 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e0e92156-728b-4d31-a078-abb2fa34fe27-trusted-ca\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.198982 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/e0e92156-728b-4d31-a078-abb2fa34fe27-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.199756 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/e0e92156-728b-4d31-a078-abb2fa34fe27-registry-certificates\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.199794 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/e0e92156-728b-4d31-a078-abb2fa34fe27-trusted-ca\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.204250 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/e0e92156-728b-4d31-a078-abb2fa34fe27-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.204306 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-registry-tls\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.214521 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-bound-sa-token\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.215460 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5h5q6\" (UniqueName: \"kubernetes.io/projected/e0e92156-728b-4d31-a078-abb2fa34fe27-kube-api-access-5h5q6\") pod \"image-registry-66df7c8f76-6jt7j\" (UID: \"e0e92156-728b-4d31-a078-abb2fa34fe27\") " pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.332707 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.500780 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6jt7j"] Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.528627 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" event={"ID":"e0e92156-728b-4d31-a078-abb2fa34fe27","Type":"ContainerStarted","Data":"191553211c021a7dd6dd89af4bf178fd504f3eaddb9124c6278a237cb5d699ef"} Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.531973 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="d926a7f2dcbb421ddc0b4cecd49fbe2ef40e7e877b6d38a43f2f529c00e06b57" exitCode=0 Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.532031 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"d926a7f2dcbb421ddc0b4cecd49fbe2ef40e7e877b6d38a43f2f529c00e06b57"} Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.532066 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"89b3ad23204d4cf9cafe056f477e14abf29509dbae044fabcb7a013294397e92"} Jan 28 18:40:46 crc kubenswrapper[4767]: I0128 18:40:46.532097 4767 scope.go:117] "RemoveContainer" containerID="34c635ea69766969f7f4b7e505bfdf39a502ec67ed786aa17cc6f9134cc202ab" Jan 28 18:40:47 crc kubenswrapper[4767]: I0128 18:40:47.539321 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" event={"ID":"e0e92156-728b-4d31-a078-abb2fa34fe27","Type":"ContainerStarted","Data":"8a170d7aa5dacd56e88100f5ef81d6bf76fd8be4d92bdde64a275d51c3277b56"} Jan 28 18:40:47 crc kubenswrapper[4767]: I0128 18:40:47.539943 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:40:47 crc kubenswrapper[4767]: I0128 18:40:47.557114 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" podStartSLOduration=2.557096316 podStartE2EDuration="2.557096316s" podCreationTimestamp="2026-01-28 18:40:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:40:47.555434403 +0000 UTC m=+653.519617287" watchObservedRunningTime="2026-01-28 18:40:47.557096316 +0000 UTC m=+653.521279190" Jan 28 18:41:06 crc kubenswrapper[4767]: I0128 18:41:06.337409 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-6jt7j" Jan 28 18:41:06 crc kubenswrapper[4767]: I0128 18:41:06.388874 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-77vtc"] Jan 28 18:41:31 crc kubenswrapper[4767]: I0128 18:41:31.428285 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" podUID="b803ae3e-471d-4c86-a375-c4e7ab4403cd" containerName="registry" containerID="cri-o://b6503fecf8c37d9a30928ecd8028313e8c74ce6442f8f24626e9ffdc0194dda5" gracePeriod=30 Jan 28 18:41:31 crc kubenswrapper[4767]: I0128 18:41:31.775048 4767 generic.go:334] "Generic (PLEG): container finished" podID="b803ae3e-471d-4c86-a375-c4e7ab4403cd" containerID="b6503fecf8c37d9a30928ecd8028313e8c74ce6442f8f24626e9ffdc0194dda5" exitCode=0 Jan 28 18:41:31 crc kubenswrapper[4767]: I0128 18:41:31.775102 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" event={"ID":"b803ae3e-471d-4c86-a375-c4e7ab4403cd","Type":"ContainerDied","Data":"b6503fecf8c37d9a30928ecd8028313e8c74ce6442f8f24626e9ffdc0194dda5"} Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.486987 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611462 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611519 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-tls\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611545 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-certificates\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611572 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b803ae3e-471d-4c86-a375-c4e7ab4403cd-ca-trust-extracted\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611609 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b803ae3e-471d-4c86-a375-c4e7ab4403cd-installation-pull-secrets\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611640 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vxfp\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-kube-api-access-4vxfp\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611668 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-trusted-ca\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.611730 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-bound-sa-token\") pod \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\" (UID: \"b803ae3e-471d-4c86-a375-c4e7ab4403cd\") " Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.613104 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.613804 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.619787 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.620305 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.620436 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b803ae3e-471d-4c86-a375-c4e7ab4403cd-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.621980 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-kube-api-access-4vxfp" (OuterVolumeSpecName: "kube-api-access-4vxfp") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "kube-api-access-4vxfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.622941 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.629765 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b803ae3e-471d-4c86-a375-c4e7ab4403cd-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "b803ae3e-471d-4c86-a375-c4e7ab4403cd" (UID: "b803ae3e-471d-4c86-a375-c4e7ab4403cd"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.714379 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vxfp\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-kube-api-access-4vxfp\") on node \"crc\" DevicePath \"\"" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.714412 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.714422 4767 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.714431 4767 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.714439 4767 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/b803ae3e-471d-4c86-a375-c4e7ab4403cd-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.714447 4767 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/b803ae3e-471d-4c86-a375-c4e7ab4403cd-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.714456 4767 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/b803ae3e-471d-4c86-a375-c4e7ab4403cd-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.781840 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.781821 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-77vtc" event={"ID":"b803ae3e-471d-4c86-a375-c4e7ab4403cd","Type":"ContainerDied","Data":"83ffea9b9058f4523f754e6113f9aa2f6f7a30a6fd4e5d6a1c4605b6e1b36aa6"} Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.781976 4767 scope.go:117] "RemoveContainer" containerID="b6503fecf8c37d9a30928ecd8028313e8c74ce6442f8f24626e9ffdc0194dda5" Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.811418 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-77vtc"] Jan 28 18:41:32 crc kubenswrapper[4767]: I0128 18:41:32.815365 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-77vtc"] Jan 28 18:41:34 crc kubenswrapper[4767]: I0128 18:41:34.802315 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b803ae3e-471d-4c86-a375-c4e7ab4403cd" path="/var/lib/kubelet/pods/b803ae3e-471d-4c86-a375-c4e7ab4403cd/volumes" Jan 28 18:41:44 crc kubenswrapper[4767]: I0128 18:41:44.993619 4767 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 28 18:41:57 crc kubenswrapper[4767]: I0128 18:41:57.311798 4767 scope.go:117] "RemoveContainer" containerID="eb756c74e0f6151d7175b5ab031e19bd11ae890304b54915dac5fc3be98d4ecf" Jan 28 18:42:45 crc kubenswrapper[4767]: I0128 18:42:45.455549 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:42:45 crc kubenswrapper[4767]: I0128 18:42:45.456129 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.031241 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-njmvq"] Jan 28 18:43:10 crc kubenswrapper[4767]: E0128 18:43:10.031953 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b803ae3e-471d-4c86-a375-c4e7ab4403cd" containerName="registry" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.031967 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b803ae3e-471d-4c86-a375-c4e7ab4403cd" containerName="registry" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.032059 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b803ae3e-471d-4c86-a375-c4e7ab4403cd" containerName="registry" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.032577 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-njmvq" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.048066 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-njmvq"] Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.052731 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6"] Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.053517 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.055167 4767 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-wqsmk" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.055982 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.055980 4767 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-gmqgr" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.056018 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.063661 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-twwkv"] Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.064407 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.068776 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6"] Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.074854 4767 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-6szsh" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.085369 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-twwkv"] Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.088743 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p98hn\" (UniqueName: \"kubernetes.io/projected/d472b3ea-2a84-47e5-b9ee-56ddb86cfadf-kube-api-access-p98hn\") pod \"cert-manager-cainjector-cf98fcc89-qrhz6\" (UID: \"d472b3ea-2a84-47e5-b9ee-56ddb86cfadf\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.088921 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jkwcc\" (UniqueName: \"kubernetes.io/projected/52e7e92c-6b90-43b0-afaa-d90522a999e9-kube-api-access-jkwcc\") pod \"cert-manager-858654f9db-njmvq\" (UID: \"52e7e92c-6b90-43b0-afaa-d90522a999e9\") " pod="cert-manager/cert-manager-858654f9db-njmvq" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.089044 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg5hv\" (UniqueName: \"kubernetes.io/projected/d775633b-82b2-45f0-bf58-f3d45ee298a7-kube-api-access-qg5hv\") pod \"cert-manager-webhook-687f57d79b-twwkv\" (UID: \"d775633b-82b2-45f0-bf58-f3d45ee298a7\") " pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.190324 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg5hv\" (UniqueName: \"kubernetes.io/projected/d775633b-82b2-45f0-bf58-f3d45ee298a7-kube-api-access-qg5hv\") pod \"cert-manager-webhook-687f57d79b-twwkv\" (UID: \"d775633b-82b2-45f0-bf58-f3d45ee298a7\") " pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.190451 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p98hn\" (UniqueName: \"kubernetes.io/projected/d472b3ea-2a84-47e5-b9ee-56ddb86cfadf-kube-api-access-p98hn\") pod \"cert-manager-cainjector-cf98fcc89-qrhz6\" (UID: \"d472b3ea-2a84-47e5-b9ee-56ddb86cfadf\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.190482 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jkwcc\" (UniqueName: \"kubernetes.io/projected/52e7e92c-6b90-43b0-afaa-d90522a999e9-kube-api-access-jkwcc\") pod \"cert-manager-858654f9db-njmvq\" (UID: \"52e7e92c-6b90-43b0-afaa-d90522a999e9\") " pod="cert-manager/cert-manager-858654f9db-njmvq" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.210238 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jkwcc\" (UniqueName: \"kubernetes.io/projected/52e7e92c-6b90-43b0-afaa-d90522a999e9-kube-api-access-jkwcc\") pod \"cert-manager-858654f9db-njmvq\" (UID: \"52e7e92c-6b90-43b0-afaa-d90522a999e9\") " pod="cert-manager/cert-manager-858654f9db-njmvq" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.212677 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg5hv\" (UniqueName: \"kubernetes.io/projected/d775633b-82b2-45f0-bf58-f3d45ee298a7-kube-api-access-qg5hv\") pod \"cert-manager-webhook-687f57d79b-twwkv\" (UID: \"d775633b-82b2-45f0-bf58-f3d45ee298a7\") " pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.215024 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p98hn\" (UniqueName: \"kubernetes.io/projected/d472b3ea-2a84-47e5-b9ee-56ddb86cfadf-kube-api-access-p98hn\") pod \"cert-manager-cainjector-cf98fcc89-qrhz6\" (UID: \"d472b3ea-2a84-47e5-b9ee-56ddb86cfadf\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.351135 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-njmvq" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.370429 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.386024 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.644610 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-twwkv"] Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.656554 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:43:10 crc kubenswrapper[4767]: W0128 18:43:10.811832 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd472b3ea_2a84_47e5_b9ee_56ddb86cfadf.slice/crio-c8f420527f2cb6bcddf87d345b4764372f1982ee4330833e1e0c4b5c46a91d91 WatchSource:0}: Error finding container c8f420527f2cb6bcddf87d345b4764372f1982ee4330833e1e0c4b5c46a91d91: Status 404 returned error can't find the container with id c8f420527f2cb6bcddf87d345b4764372f1982ee4330833e1e0c4b5c46a91d91 Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.815667 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-njmvq"] Jan 28 18:43:10 crc kubenswrapper[4767]: I0128 18:43:10.816004 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6"] Jan 28 18:43:11 crc kubenswrapper[4767]: I0128 18:43:11.322601 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" event={"ID":"d472b3ea-2a84-47e5-b9ee-56ddb86cfadf","Type":"ContainerStarted","Data":"c8f420527f2cb6bcddf87d345b4764372f1982ee4330833e1e0c4b5c46a91d91"} Jan 28 18:43:11 crc kubenswrapper[4767]: I0128 18:43:11.324810 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-njmvq" event={"ID":"52e7e92c-6b90-43b0-afaa-d90522a999e9","Type":"ContainerStarted","Data":"1b203fcd3cd5500ab5918100cdc90833dae082bd703f8706ddd08897e953d263"} Jan 28 18:43:11 crc kubenswrapper[4767]: I0128 18:43:11.325557 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" event={"ID":"d775633b-82b2-45f0-bf58-f3d45ee298a7","Type":"ContainerStarted","Data":"143c1e9d4f2aad76167749af12749f7fe84f9087dffac922c34572fc8b81b2a6"} Jan 28 18:43:15 crc kubenswrapper[4767]: I0128 18:43:15.351488 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" event={"ID":"d775633b-82b2-45f0-bf58-f3d45ee298a7","Type":"ContainerStarted","Data":"5991799d44e8e69c83e49789eee94ebf917a9b314ed9e8c9eb088d230c40c854"} Jan 28 18:43:15 crc kubenswrapper[4767]: I0128 18:43:15.352057 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" Jan 28 18:43:15 crc kubenswrapper[4767]: I0128 18:43:15.369154 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" podStartSLOduration=1.417115194 podStartE2EDuration="5.36913315s" podCreationTimestamp="2026-01-28 18:43:10 +0000 UTC" firstStartedPulling="2026-01-28 18:43:10.656313495 +0000 UTC m=+796.620496369" lastFinishedPulling="2026-01-28 18:43:14.608331461 +0000 UTC m=+800.572514325" observedRunningTime="2026-01-28 18:43:15.366460876 +0000 UTC m=+801.330643750" watchObservedRunningTime="2026-01-28 18:43:15.36913315 +0000 UTC m=+801.333316024" Jan 28 18:43:15 crc kubenswrapper[4767]: I0128 18:43:15.455223 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:43:15 crc kubenswrapper[4767]: I0128 18:43:15.455287 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:43:17 crc kubenswrapper[4767]: I0128 18:43:17.363643 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" event={"ID":"d472b3ea-2a84-47e5-b9ee-56ddb86cfadf","Type":"ContainerStarted","Data":"7d7dc498ab23b9f16af57f3d8e698dd5bd212baa169857bb57fe1f106ad1b496"} Jan 28 18:43:17 crc kubenswrapper[4767]: I0128 18:43:17.367591 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-njmvq" event={"ID":"52e7e92c-6b90-43b0-afaa-d90522a999e9","Type":"ContainerStarted","Data":"2872b2eb86f10d94dd5288a1b221c0047c930f464d0aea7d3b218763ae49b9b2"} Jan 28 18:43:17 crc kubenswrapper[4767]: I0128 18:43:17.389555 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-qrhz6" podStartSLOduration=1.191290876 podStartE2EDuration="7.389522427s" podCreationTimestamp="2026-01-28 18:43:10 +0000 UTC" firstStartedPulling="2026-01-28 18:43:10.816265737 +0000 UTC m=+796.780448611" lastFinishedPulling="2026-01-28 18:43:17.014497288 +0000 UTC m=+802.978680162" observedRunningTime="2026-01-28 18:43:17.386081079 +0000 UTC m=+803.350263953" watchObservedRunningTime="2026-01-28 18:43:17.389522427 +0000 UTC m=+803.353705301" Jan 28 18:43:17 crc kubenswrapper[4767]: I0128 18:43:17.421462 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-njmvq" podStartSLOduration=1.197980377 podStartE2EDuration="7.421429613s" podCreationTimestamp="2026-01-28 18:43:10 +0000 UTC" firstStartedPulling="2026-01-28 18:43:10.796731671 +0000 UTC m=+796.760914545" lastFinishedPulling="2026-01-28 18:43:17.020180897 +0000 UTC m=+802.984363781" observedRunningTime="2026-01-28 18:43:17.417369015 +0000 UTC m=+803.381551909" watchObservedRunningTime="2026-01-28 18:43:17.421429613 +0000 UTC m=+803.385612487" Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.882004 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-twwkv" Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.909631 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mwmbk"] Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.909992 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-controller" containerID="cri-o://3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" gracePeriod=30 Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.910102 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-node" containerID="cri-o://61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" gracePeriod=30 Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.910077 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="sbdb" containerID="cri-o://a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" gracePeriod=30 Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.910185 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="nbdb" containerID="cri-o://86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" gracePeriod=30 Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.910238 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="northd" containerID="cri-o://fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" gracePeriod=30 Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.910161 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" gracePeriod=30 Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.910136 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-acl-logging" containerID="cri-o://8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" gracePeriod=30 Jan 28 18:43:20 crc kubenswrapper[4767]: I0128 18:43:20.941836 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" containerID="cri-o://4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" gracePeriod=30 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.854828 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/2.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.857075 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovn-acl-logging/0.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.857543 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovn-controller/0.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.857927 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.875465 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hjjlv_5a8e6ea7-4d55-4222-840b-c0383a9bc7da/kube-multus/1.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.876119 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hjjlv_5a8e6ea7-4d55-4222-840b-c0383a9bc7da/kube-multus/0.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.876166 4767 generic.go:334] "Generic (PLEG): container finished" podID="5a8e6ea7-4d55-4222-840b-c0383a9bc7da" containerID="64bb8fae529e99311f52941cdce4bc8b89a63b6c8a6bd18a21f0450a81d076cf" exitCode=2 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.876250 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hjjlv" event={"ID":"5a8e6ea7-4d55-4222-840b-c0383a9bc7da","Type":"ContainerDied","Data":"64bb8fae529e99311f52941cdce4bc8b89a63b6c8a6bd18a21f0450a81d076cf"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.876296 4767 scope.go:117] "RemoveContainer" containerID="4aecb4f95d31bdf5cb5a27c4bc7a1646edd0723e353558b4e242dbafcfad9aa1" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.876802 4767 scope.go:117] "RemoveContainer" containerID="64bb8fae529e99311f52941cdce4bc8b89a63b6c8a6bd18a21f0450a81d076cf" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.879346 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovnkube-controller/2.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.882341 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovn-acl-logging/0.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.882833 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-mwmbk_0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/ovn-controller/0.log" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884143 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" exitCode=0 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884166 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" exitCode=0 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884176 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" exitCode=0 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884169 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884233 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884245 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884255 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884186 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" exitCode=0 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884276 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" exitCode=0 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884287 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" exitCode=0 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884293 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" exitCode=143 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884300 4767 generic.go:334] "Generic (PLEG): container finished" podID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerID="3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" exitCode=143 Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884294 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884312 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884321 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884330 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884341 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884347 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884353 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884358 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884363 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884368 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884372 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884378 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884383 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884390 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884396 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884402 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884407 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884412 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884417 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884422 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884426 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884431 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884436 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884441 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884448 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884457 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884464 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884470 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884476 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884482 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884488 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884495 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884501 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884506 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884511 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884518 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-mwmbk" event={"ID":"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf","Type":"ContainerDied","Data":"54c8264bb62cb6c7eb8784cd67fc571df58f43d61b15a28e7c3c17e684a4901b"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884526 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884532 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884537 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884542 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884548 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884553 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884558 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884563 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884569 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.884574 4767 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.912248 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hw9m5"] Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913517 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913536 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913553 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913562 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913574 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kubecfg-setup" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913581 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kubecfg-setup" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913595 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913603 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913612 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-acl-logging" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913619 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-acl-logging" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913627 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-node" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913636 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-node" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913648 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913655 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913667 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="sbdb" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913674 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="sbdb" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913685 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="nbdb" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913691 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="nbdb" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913702 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913708 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.913716 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="northd" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913723 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="northd" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913838 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-node" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913850 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="kube-rbac-proxy-ovn-metrics" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913857 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="nbdb" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913866 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913900 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="northd" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913909 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="sbdb" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913918 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913927 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913941 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.913952 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovn-acl-logging" Jan 28 18:43:21 crc kubenswrapper[4767]: E0128 18:43:21.914070 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.914081 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.914196 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" containerName="ovnkube-controller" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.916024 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947158 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-var-lib-openvswitch\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947199 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-slash\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947237 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-etc-openvswitch\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947254 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-ovn\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947277 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-systemd\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947298 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-systemd-units\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947314 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-netd\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947333 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-netns\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947354 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-log-socket\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947377 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzlbv\" (UniqueName: \"kubernetes.io/projected/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-kube-api-access-dzlbv\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947393 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-openvswitch\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947410 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-config\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947424 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-node-log\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947440 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-var-lib-cni-networks-ovn-kubernetes\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947474 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovn-node-metrics-cert\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947495 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-bin\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947517 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-env-overrides\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947538 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-script-lib\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947572 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-ovn-kubernetes\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947599 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-kubelet\") pod \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\" (UID: \"0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf\") " Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947861 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947902 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947923 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-slash" (OuterVolumeSpecName: "host-slash") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947949 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.947972 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948107 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948148 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948152 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948174 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948256 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-node-log" (OuterVolumeSpecName: "node-log") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948286 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948457 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948500 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948553 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948556 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-log-socket" (OuterVolumeSpecName: "log-socket") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948575 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.948706 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.955047 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.955958 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-kube-api-access-dzlbv" (OuterVolumeSpecName: "kube-api-access-dzlbv") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "kube-api-access-dzlbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:43:21 crc kubenswrapper[4767]: I0128 18:43:21.965520 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" (UID: "0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.011660 4767 scope.go:117] "RemoveContainer" containerID="4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.030833 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.048866 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-kubelet\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.048932 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-slash\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049004 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-etc-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049028 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-systemd-units\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049049 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-ovnkube-script-lib\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049078 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-cni-bin\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049103 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-node-log\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049132 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049160 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-var-lib-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049184 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq2xg\" (UniqueName: \"kubernetes.io/projected/19018b4a-fb7a-45cc-a6ae-689b76417638-kube-api-access-nq2xg\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049240 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-run-netns\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049303 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-ovn\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049328 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-cni-netd\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049351 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/19018b4a-fb7a-45cc-a6ae-689b76417638-ovn-node-metrics-cert\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049396 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049432 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-systemd\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049461 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-env-overrides\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049481 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-log-socket\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049516 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-ovnkube-config\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049553 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-run-ovn-kubernetes\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049606 4767 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049627 4767 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049645 4767 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049659 4767 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-log-socket\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049670 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzlbv\" (UniqueName: \"kubernetes.io/projected/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-kube-api-access-dzlbv\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049681 4767 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049691 4767 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049701 4767 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-node-log\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049712 4767 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049758 4767 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049770 4767 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049781 4767 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049793 4767 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049802 4767 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049812 4767 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049821 4767 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049832 4767 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-host-slash\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049844 4767 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049854 4767 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.049863 4767 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.077473 4767 scope.go:117] "RemoveContainer" containerID="a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.097303 4767 scope.go:117] "RemoveContainer" containerID="86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.113391 4767 scope.go:117] "RemoveContainer" containerID="fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.126306 4767 scope.go:117] "RemoveContainer" containerID="c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.139825 4767 scope.go:117] "RemoveContainer" containerID="61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151162 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-etc-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151245 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-systemd-units\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151275 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-ovnkube-script-lib\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151310 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-cni-bin\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151339 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-node-log\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151368 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151391 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nq2xg\" (UniqueName: \"kubernetes.io/projected/19018b4a-fb7a-45cc-a6ae-689b76417638-kube-api-access-nq2xg\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151413 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-var-lib-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151413 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-systemd-units\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151440 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-run-netns\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151503 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-run-netns\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151517 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-ovn\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151550 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-var-lib-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151551 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-cni-netd\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151564 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-etc-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151612 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151579 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-cni-netd\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151624 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-ovn\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151579 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-cni-bin\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151564 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-node-log\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151589 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-openvswitch\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151719 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/19018b4a-fb7a-45cc-a6ae-689b76417638-ovn-node-metrics-cert\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151582 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151785 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-systemd\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151763 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-run-systemd\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.151968 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-env-overrides\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152042 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-log-socket\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152076 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-ovnkube-config\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152149 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-run-ovn-kubernetes\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152174 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-kubelet\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152196 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-slash\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152308 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-ovnkube-script-lib\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152421 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-slash\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152510 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-run-ovn-kubernetes\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152550 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-log-socket\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152574 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/19018b4a-fb7a-45cc-a6ae-689b76417638-host-kubelet\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.152828 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-env-overrides\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.153415 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/19018b4a-fb7a-45cc-a6ae-689b76417638-ovnkube-config\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.155889 4767 scope.go:117] "RemoveContainer" containerID="8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.156166 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/19018b4a-fb7a-45cc-a6ae-689b76417638-ovn-node-metrics-cert\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.167557 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq2xg\" (UniqueName: \"kubernetes.io/projected/19018b4a-fb7a-45cc-a6ae-689b76417638-kube-api-access-nq2xg\") pod \"ovnkube-node-hw9m5\" (UID: \"19018b4a-fb7a-45cc-a6ae-689b76417638\") " pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.200136 4767 scope.go:117] "RemoveContainer" containerID="3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.216846 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mwmbk"] Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.219472 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-mwmbk"] Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.228301 4767 scope.go:117] "RemoveContainer" containerID="6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.230152 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.256823 4767 scope.go:117] "RemoveContainer" containerID="4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.257303 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": container with ID starting with 4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c not found: ID does not exist" containerID="4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.257334 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} err="failed to get container status \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": rpc error: code = NotFound desc = could not find container \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": container with ID starting with 4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.257354 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.257873 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": container with ID starting with 250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64 not found: ID does not exist" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.257931 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} err="failed to get container status \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": rpc error: code = NotFound desc = could not find container \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": container with ID starting with 250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.257974 4767 scope.go:117] "RemoveContainer" containerID="a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.258524 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": container with ID starting with a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d not found: ID does not exist" containerID="a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.258555 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} err="failed to get container status \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": rpc error: code = NotFound desc = could not find container \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": container with ID starting with a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.258573 4767 scope.go:117] "RemoveContainer" containerID="86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.258970 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": container with ID starting with 86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46 not found: ID does not exist" containerID="86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.259006 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} err="failed to get container status \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": rpc error: code = NotFound desc = could not find container \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": container with ID starting with 86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.259027 4767 scope.go:117] "RemoveContainer" containerID="fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.260113 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": container with ID starting with fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889 not found: ID does not exist" containerID="fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.260162 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} err="failed to get container status \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": rpc error: code = NotFound desc = could not find container \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": container with ID starting with fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.260194 4767 scope.go:117] "RemoveContainer" containerID="c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.260526 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": container with ID starting with c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21 not found: ID does not exist" containerID="c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.260550 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} err="failed to get container status \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": rpc error: code = NotFound desc = could not find container \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": container with ID starting with c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.260565 4767 scope.go:117] "RemoveContainer" containerID="61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.260798 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": container with ID starting with 61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2 not found: ID does not exist" containerID="61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.260819 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} err="failed to get container status \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": rpc error: code = NotFound desc = could not find container \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": container with ID starting with 61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.260833 4767 scope.go:117] "RemoveContainer" containerID="8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.261153 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": container with ID starting with 8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919 not found: ID does not exist" containerID="8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.261172 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} err="failed to get container status \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": rpc error: code = NotFound desc = could not find container \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": container with ID starting with 8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.261186 4767 scope.go:117] "RemoveContainer" containerID="3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.261586 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": container with ID starting with 3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8 not found: ID does not exist" containerID="3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.261607 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} err="failed to get container status \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": rpc error: code = NotFound desc = could not find container \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": container with ID starting with 3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.261622 4767 scope.go:117] "RemoveContainer" containerID="6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c" Jan 28 18:43:22 crc kubenswrapper[4767]: W0128 18:43:22.261696 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19018b4a_fb7a_45cc_a6ae_689b76417638.slice/crio-4a7145c2b194c976695e5f7550e5325570e3ab5328801cd13b068c2c7a4d7b71 WatchSource:0}: Error finding container 4a7145c2b194c976695e5f7550e5325570e3ab5328801cd13b068c2c7a4d7b71: Status 404 returned error can't find the container with id 4a7145c2b194c976695e5f7550e5325570e3ab5328801cd13b068c2c7a4d7b71 Jan 28 18:43:22 crc kubenswrapper[4767]: E0128 18:43:22.261786 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": container with ID starting with 6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c not found: ID does not exist" containerID="6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.261845 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} err="failed to get container status \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": rpc error: code = NotFound desc = could not find container \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": container with ID starting with 6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.261869 4767 scope.go:117] "RemoveContainer" containerID="4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.262094 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} err="failed to get container status \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": rpc error: code = NotFound desc = could not find container \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": container with ID starting with 4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.262115 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.262445 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} err="failed to get container status \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": rpc error: code = NotFound desc = could not find container \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": container with ID starting with 250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.262462 4767 scope.go:117] "RemoveContainer" containerID="a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.262675 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} err="failed to get container status \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": rpc error: code = NotFound desc = could not find container \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": container with ID starting with a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.262693 4767 scope.go:117] "RemoveContainer" containerID="86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263040 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} err="failed to get container status \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": rpc error: code = NotFound desc = could not find container \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": container with ID starting with 86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263056 4767 scope.go:117] "RemoveContainer" containerID="fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263431 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} err="failed to get container status \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": rpc error: code = NotFound desc = could not find container \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": container with ID starting with fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263451 4767 scope.go:117] "RemoveContainer" containerID="c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263631 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} err="failed to get container status \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": rpc error: code = NotFound desc = could not find container \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": container with ID starting with c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263649 4767 scope.go:117] "RemoveContainer" containerID="61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263873 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} err="failed to get container status \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": rpc error: code = NotFound desc = could not find container \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": container with ID starting with 61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.263903 4767 scope.go:117] "RemoveContainer" containerID="8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.264356 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} err="failed to get container status \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": rpc error: code = NotFound desc = could not find container \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": container with ID starting with 8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.264378 4767 scope.go:117] "RemoveContainer" containerID="3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.264606 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} err="failed to get container status \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": rpc error: code = NotFound desc = could not find container \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": container with ID starting with 3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.264639 4767 scope.go:117] "RemoveContainer" containerID="6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.264878 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} err="failed to get container status \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": rpc error: code = NotFound desc = could not find container \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": container with ID starting with 6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.264900 4767 scope.go:117] "RemoveContainer" containerID="4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265141 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} err="failed to get container status \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": rpc error: code = NotFound desc = could not find container \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": container with ID starting with 4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265184 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265454 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} err="failed to get container status \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": rpc error: code = NotFound desc = could not find container \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": container with ID starting with 250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265473 4767 scope.go:117] "RemoveContainer" containerID="a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265667 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} err="failed to get container status \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": rpc error: code = NotFound desc = could not find container \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": container with ID starting with a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265686 4767 scope.go:117] "RemoveContainer" containerID="86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265878 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} err="failed to get container status \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": rpc error: code = NotFound desc = could not find container \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": container with ID starting with 86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.265909 4767 scope.go:117] "RemoveContainer" containerID="fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.266110 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} err="failed to get container status \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": rpc error: code = NotFound desc = could not find container \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": container with ID starting with fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.266135 4767 scope.go:117] "RemoveContainer" containerID="c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.267062 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} err="failed to get container status \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": rpc error: code = NotFound desc = could not find container \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": container with ID starting with c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.267118 4767 scope.go:117] "RemoveContainer" containerID="61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.267523 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} err="failed to get container status \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": rpc error: code = NotFound desc = could not find container \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": container with ID starting with 61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.267573 4767 scope.go:117] "RemoveContainer" containerID="8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.267842 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} err="failed to get container status \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": rpc error: code = NotFound desc = could not find container \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": container with ID starting with 8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.267886 4767 scope.go:117] "RemoveContainer" containerID="3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.268291 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} err="failed to get container status \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": rpc error: code = NotFound desc = could not find container \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": container with ID starting with 3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.268360 4767 scope.go:117] "RemoveContainer" containerID="6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.268643 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} err="failed to get container status \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": rpc error: code = NotFound desc = could not find container \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": container with ID starting with 6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.268668 4767 scope.go:117] "RemoveContainer" containerID="4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.268918 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c"} err="failed to get container status \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": rpc error: code = NotFound desc = could not find container \"4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c\": container with ID starting with 4bb52cb755721ffe7bd26934a68155d3cea24a33f5f394773783b49c8d5c452c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.268942 4767 scope.go:117] "RemoveContainer" containerID="250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269167 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64"} err="failed to get container status \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": rpc error: code = NotFound desc = could not find container \"250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64\": container with ID starting with 250cf1b6b2d2e1a5284609f537e5301c93b8f7e4c927794af85cc8cdaa98cd64 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269191 4767 scope.go:117] "RemoveContainer" containerID="a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269400 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d"} err="failed to get container status \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": rpc error: code = NotFound desc = could not find container \"a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d\": container with ID starting with a023001f94b3fb465ac98e5be742d96bbc2744b369353b37f7cb2ceff4b3da0d not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269423 4767 scope.go:117] "RemoveContainer" containerID="86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269700 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46"} err="failed to get container status \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": rpc error: code = NotFound desc = could not find container \"86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46\": container with ID starting with 86c632a0856a1d19ac80788d3cd3e48a05aa13df7b507e28a46b832277cbbc46 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269722 4767 scope.go:117] "RemoveContainer" containerID="fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269951 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889"} err="failed to get container status \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": rpc error: code = NotFound desc = could not find container \"fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889\": container with ID starting with fad16b0522e0d96375c1d9d2c73582c2c23e2c1feb23a1ce4b6f138140df8889 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.269971 4767 scope.go:117] "RemoveContainer" containerID="c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.270189 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21"} err="failed to get container status \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": rpc error: code = NotFound desc = could not find container \"c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21\": container with ID starting with c01f5e27bbdf2a6a0cf49c769bc5f5d1bd78765e87adeff938caae39fe34ff21 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.270229 4767 scope.go:117] "RemoveContainer" containerID="61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.270482 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2"} err="failed to get container status \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": rpc error: code = NotFound desc = could not find container \"61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2\": container with ID starting with 61316b0bf23a0373b3ebffbb3c6e7817bab3e2a17a4f5695cdc58c2c06f5bfc2 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.270508 4767 scope.go:117] "RemoveContainer" containerID="8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.270832 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919"} err="failed to get container status \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": rpc error: code = NotFound desc = could not find container \"8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919\": container with ID starting with 8a44be5fc5fb03266aa327ccd2036fd1d1b80a46017ff1b8c642925028773919 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.270856 4767 scope.go:117] "RemoveContainer" containerID="3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.271084 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8"} err="failed to get container status \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": rpc error: code = NotFound desc = could not find container \"3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8\": container with ID starting with 3324621ea85776ace814d5c62af9fabb499ee15a81690ace135076eafefc60e8 not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.271108 4767 scope.go:117] "RemoveContainer" containerID="6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.271365 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c"} err="failed to get container status \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": rpc error: code = NotFound desc = could not find container \"6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c\": container with ID starting with 6ec73f72dfbd5f3c450bbabb8902a656aa34c7473b60d6168a8313eaafa9dc5c not found: ID does not exist" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.807276 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf" path="/var/lib/kubelet/pods/0df1a8cc-2906-4ccb-88c3-5580dcb5ecdf/volumes" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.892616 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-hjjlv_5a8e6ea7-4d55-4222-840b-c0383a9bc7da/kube-multus/1.log" Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.892717 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-hjjlv" event={"ID":"5a8e6ea7-4d55-4222-840b-c0383a9bc7da","Type":"ContainerStarted","Data":"a8b1a722be1c698d39c4de79cb5bcba9e09178de2953e450ceb81c5e302b1f8e"} Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.895276 4767 generic.go:334] "Generic (PLEG): container finished" podID="19018b4a-fb7a-45cc-a6ae-689b76417638" containerID="c9c48196b0360ce403b7106e1d60b7d6e14f241a840947c7965ee0cb6b928e6d" exitCode=0 Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.895360 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerDied","Data":"c9c48196b0360ce403b7106e1d60b7d6e14f241a840947c7965ee0cb6b928e6d"} Jan 28 18:43:22 crc kubenswrapper[4767]: I0128 18:43:22.895396 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"4a7145c2b194c976695e5f7550e5325570e3ab5328801cd13b068c2c7a4d7b71"} Jan 28 18:43:23 crc kubenswrapper[4767]: I0128 18:43:23.904398 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"2756381fdcac3888601676c193c69999e398d328911c7b853740d4aec1162c3c"} Jan 28 18:43:23 crc kubenswrapper[4767]: I0128 18:43:23.904679 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"8f96a94c673498c780ee4441fb738200c31004e62ea13db37485acfebf500d36"} Jan 28 18:43:23 crc kubenswrapper[4767]: I0128 18:43:23.904689 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"0c6a1222ce4650aa35fe5479d1d6c13a6f4bd9d1a14f758cc4723521425983e2"} Jan 28 18:43:23 crc kubenswrapper[4767]: I0128 18:43:23.904699 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"521e29c652a9c7e93f026562010b7c3bdb085d9d2b3100a3103558ee68295889"} Jan 28 18:43:24 crc kubenswrapper[4767]: I0128 18:43:24.912340 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"5b92774e103a2eb5ca3ebb2d1dceabe495aa2c2a11ec82523cc764e3efa48051"} Jan 28 18:43:24 crc kubenswrapper[4767]: I0128 18:43:24.912620 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"fe7d7833eb02ea8c972e4c1c8d5d16a5f052363e5660a8d4c7d6748bd5009cdd"} Jan 28 18:43:26 crc kubenswrapper[4767]: I0128 18:43:26.925792 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"eb5491058f6fb07b5562eab8997ab7c549fefaae905dce1118848a4dbbc507e7"} Jan 28 18:43:30 crc kubenswrapper[4767]: I0128 18:43:30.951831 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" event={"ID":"19018b4a-fb7a-45cc-a6ae-689b76417638","Type":"ContainerStarted","Data":"abf681f7f48aa898f12477cc471688fbf320c2e21265245f76432b8d69678e92"} Jan 28 18:43:30 crc kubenswrapper[4767]: I0128 18:43:30.952375 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:30 crc kubenswrapper[4767]: I0128 18:43:30.983256 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" podStartSLOduration=9.983239102 podStartE2EDuration="9.983239102s" podCreationTimestamp="2026-01-28 18:43:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:43:30.983008135 +0000 UTC m=+816.947191019" watchObservedRunningTime="2026-01-28 18:43:30.983239102 +0000 UTC m=+816.947421986" Jan 28 18:43:30 crc kubenswrapper[4767]: I0128 18:43:30.986585 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:31 crc kubenswrapper[4767]: I0128 18:43:31.956687 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:31 crc kubenswrapper[4767]: I0128 18:43:31.956993 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:32 crc kubenswrapper[4767]: I0128 18:43:32.004633 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:45 crc kubenswrapper[4767]: I0128 18:43:45.455033 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:43:45 crc kubenswrapper[4767]: I0128 18:43:45.455573 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:43:45 crc kubenswrapper[4767]: I0128 18:43:45.455631 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:43:45 crc kubenswrapper[4767]: I0128 18:43:45.456158 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"89b3ad23204d4cf9cafe056f477e14abf29509dbae044fabcb7a013294397e92"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:43:45 crc kubenswrapper[4767]: I0128 18:43:45.456227 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://89b3ad23204d4cf9cafe056f477e14abf29509dbae044fabcb7a013294397e92" gracePeriod=600 Jan 28 18:43:46 crc kubenswrapper[4767]: I0128 18:43:46.024512 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="89b3ad23204d4cf9cafe056f477e14abf29509dbae044fabcb7a013294397e92" exitCode=0 Jan 28 18:43:46 crc kubenswrapper[4767]: I0128 18:43:46.024577 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"89b3ad23204d4cf9cafe056f477e14abf29509dbae044fabcb7a013294397e92"} Jan 28 18:43:46 crc kubenswrapper[4767]: I0128 18:43:46.024855 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"3b61dfadd6f0461bd4bf84451309ddd45bb81f3a524162955ded57c40d87733d"} Jan 28 18:43:46 crc kubenswrapper[4767]: I0128 18:43:46.024876 4767 scope.go:117] "RemoveContainer" containerID="d926a7f2dcbb421ddc0b4cecd49fbe2ef40e7e877b6d38a43f2f529c00e06b57" Jan 28 18:43:52 crc kubenswrapper[4767]: I0128 18:43:52.251743 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hw9m5" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.626732 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh"] Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.628383 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.630943 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.635143 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh"] Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.748437 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.748849 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.749013 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vkmsz\" (UniqueName: \"kubernetes.io/projected/cedc3704-963c-4161-b9c9-cf2b6d8ea555-kube-api-access-vkmsz\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.850552 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.850868 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vkmsz\" (UniqueName: \"kubernetes.io/projected/cedc3704-963c-4161-b9c9-cf2b6d8ea555-kube-api-access-vkmsz\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.850976 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.851161 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.851442 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.872245 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vkmsz\" (UniqueName: \"kubernetes.io/projected/cedc3704-963c-4161-b9c9-cf2b6d8ea555-kube-api-access-vkmsz\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.953313 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 18:43:54 crc kubenswrapper[4767]: I0128 18:43:54.961164 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:43:55 crc kubenswrapper[4767]: I0128 18:43:55.143055 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh"] Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.079009 4767 generic.go:334] "Generic (PLEG): container finished" podID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerID="136270958bef0bb2d16f6c4ded886100f118be35c68081e889eef243d6578236" exitCode=0 Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.079116 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" event={"ID":"cedc3704-963c-4161-b9c9-cf2b6d8ea555","Type":"ContainerDied","Data":"136270958bef0bb2d16f6c4ded886100f118be35c68081e889eef243d6578236"} Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.079365 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" event={"ID":"cedc3704-963c-4161-b9c9-cf2b6d8ea555","Type":"ContainerStarted","Data":"330c57c7efcd20b536007c3c14470e165c853e980b1d91cbe99df019a9deaa70"} Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.806089 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wmms8"] Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.807749 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.819423 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wmms8"] Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.878472 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2k7n\" (UniqueName: \"kubernetes.io/projected/79b4937a-86f4-41da-83c6-8c8c9164552c-kube-api-access-j2k7n\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.878591 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-catalog-content\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.878636 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-utilities\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.979486 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2k7n\" (UniqueName: \"kubernetes.io/projected/79b4937a-86f4-41da-83c6-8c8c9164552c-kube-api-access-j2k7n\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.979634 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-catalog-content\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.979670 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-utilities\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.980508 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-utilities\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:56 crc kubenswrapper[4767]: I0128 18:43:56.980623 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-catalog-content\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:57 crc kubenswrapper[4767]: I0128 18:43:57.015101 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2k7n\" (UniqueName: \"kubernetes.io/projected/79b4937a-86f4-41da-83c6-8c8c9164552c-kube-api-access-j2k7n\") pod \"redhat-operators-wmms8\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:57 crc kubenswrapper[4767]: I0128 18:43:57.121250 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:43:57 crc kubenswrapper[4767]: I0128 18:43:57.342611 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wmms8"] Jan 28 18:43:57 crc kubenswrapper[4767]: W0128 18:43:57.368945 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod79b4937a_86f4_41da_83c6_8c8c9164552c.slice/crio-cc5cc9ed6d805b222cf97dde3fd52232d33e862eecd18b97a970096354c43056 WatchSource:0}: Error finding container cc5cc9ed6d805b222cf97dde3fd52232d33e862eecd18b97a970096354c43056: Status 404 returned error can't find the container with id cc5cc9ed6d805b222cf97dde3fd52232d33e862eecd18b97a970096354c43056 Jan 28 18:43:58 crc kubenswrapper[4767]: I0128 18:43:58.090706 4767 generic.go:334] "Generic (PLEG): container finished" podID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerID="4fd32f1b9a567777ccf1d38bf445759107ad9776b90c9304525c6d91156dadbf" exitCode=0 Jan 28 18:43:58 crc kubenswrapper[4767]: I0128 18:43:58.090793 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" event={"ID":"cedc3704-963c-4161-b9c9-cf2b6d8ea555","Type":"ContainerDied","Data":"4fd32f1b9a567777ccf1d38bf445759107ad9776b90c9304525c6d91156dadbf"} Jan 28 18:43:58 crc kubenswrapper[4767]: I0128 18:43:58.092840 4767 generic.go:334] "Generic (PLEG): container finished" podID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerID="1c945cca14604c4e63aa25a19a6735a29b2a5a9cfe224d8b5ad2458b369fc682" exitCode=0 Jan 28 18:43:58 crc kubenswrapper[4767]: I0128 18:43:58.092898 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmms8" event={"ID":"79b4937a-86f4-41da-83c6-8c8c9164552c","Type":"ContainerDied","Data":"1c945cca14604c4e63aa25a19a6735a29b2a5a9cfe224d8b5ad2458b369fc682"} Jan 28 18:43:58 crc kubenswrapper[4767]: I0128 18:43:58.092931 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmms8" event={"ID":"79b4937a-86f4-41da-83c6-8c8c9164552c","Type":"ContainerStarted","Data":"cc5cc9ed6d805b222cf97dde3fd52232d33e862eecd18b97a970096354c43056"} Jan 28 18:43:59 crc kubenswrapper[4767]: I0128 18:43:59.101690 4767 generic.go:334] "Generic (PLEG): container finished" podID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerID="43edd25a67051a423f6e57771ebee6e884157ac2e24da902efdeff4d79ac14d8" exitCode=0 Jan 28 18:43:59 crc kubenswrapper[4767]: I0128 18:43:59.101740 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" event={"ID":"cedc3704-963c-4161-b9c9-cf2b6d8ea555","Type":"ContainerDied","Data":"43edd25a67051a423f6e57771ebee6e884157ac2e24da902efdeff4d79ac14d8"} Jan 28 18:43:59 crc kubenswrapper[4767]: I0128 18:43:59.104419 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmms8" event={"ID":"79b4937a-86f4-41da-83c6-8c8c9164552c","Type":"ContainerStarted","Data":"e92e30f5da756a9536e7ae3611b6eb5add59db197cbe3b75d4e21cb42359f2e7"} Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.115924 4767 generic.go:334] "Generic (PLEG): container finished" podID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerID="e92e30f5da756a9536e7ae3611b6eb5add59db197cbe3b75d4e21cb42359f2e7" exitCode=0 Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.116115 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmms8" event={"ID":"79b4937a-86f4-41da-83c6-8c8c9164552c","Type":"ContainerDied","Data":"e92e30f5da756a9536e7ae3611b6eb5add59db197cbe3b75d4e21cb42359f2e7"} Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.379297 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.536281 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-bundle\") pod \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.536457 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-util\") pod \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.536486 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vkmsz\" (UniqueName: \"kubernetes.io/projected/cedc3704-963c-4161-b9c9-cf2b6d8ea555-kube-api-access-vkmsz\") pod \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\" (UID: \"cedc3704-963c-4161-b9c9-cf2b6d8ea555\") " Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.537328 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-bundle" (OuterVolumeSpecName: "bundle") pod "cedc3704-963c-4161-b9c9-cf2b6d8ea555" (UID: "cedc3704-963c-4161-b9c9-cf2b6d8ea555"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.542539 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cedc3704-963c-4161-b9c9-cf2b6d8ea555-kube-api-access-vkmsz" (OuterVolumeSpecName: "kube-api-access-vkmsz") pod "cedc3704-963c-4161-b9c9-cf2b6d8ea555" (UID: "cedc3704-963c-4161-b9c9-cf2b6d8ea555"). InnerVolumeSpecName "kube-api-access-vkmsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.551657 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-util" (OuterVolumeSpecName: "util") pod "cedc3704-963c-4161-b9c9-cf2b6d8ea555" (UID: "cedc3704-963c-4161-b9c9-cf2b6d8ea555"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.664167 4767 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.664286 4767 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/cedc3704-963c-4161-b9c9-cf2b6d8ea555-util\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:00 crc kubenswrapper[4767]: I0128 18:44:00.664302 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vkmsz\" (UniqueName: \"kubernetes.io/projected/cedc3704-963c-4161-b9c9-cf2b6d8ea555-kube-api-access-vkmsz\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:01 crc kubenswrapper[4767]: I0128 18:44:01.123359 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" event={"ID":"cedc3704-963c-4161-b9c9-cf2b6d8ea555","Type":"ContainerDied","Data":"330c57c7efcd20b536007c3c14470e165c853e980b1d91cbe99df019a9deaa70"} Jan 28 18:44:01 crc kubenswrapper[4767]: I0128 18:44:01.123710 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="330c57c7efcd20b536007c3c14470e165c853e980b1d91cbe99df019a9deaa70" Jan 28 18:44:01 crc kubenswrapper[4767]: I0128 18:44:01.123391 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh" Jan 28 18:44:01 crc kubenswrapper[4767]: I0128 18:44:01.126618 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmms8" event={"ID":"79b4937a-86f4-41da-83c6-8c8c9164552c","Type":"ContainerStarted","Data":"c27da083a8cd317df0c651347665c6021ab16d88b843741858b3a3d7f202fa67"} Jan 28 18:44:01 crc kubenswrapper[4767]: I0128 18:44:01.419999 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wmms8" podStartSLOduration=2.927199172 podStartE2EDuration="5.419977691s" podCreationTimestamp="2026-01-28 18:43:56 +0000 UTC" firstStartedPulling="2026-01-28 18:43:58.093720383 +0000 UTC m=+844.057903257" lastFinishedPulling="2026-01-28 18:44:00.586498892 +0000 UTC m=+846.550681776" observedRunningTime="2026-01-28 18:44:01.143938731 +0000 UTC m=+847.108121605" watchObservedRunningTime="2026-01-28 18:44:01.419977691 +0000 UTC m=+847.384160575" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.320039 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-lkld9"] Jan 28 18:44:02 crc kubenswrapper[4767]: E0128 18:44:02.320276 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerName="extract" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.320294 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerName="extract" Jan 28 18:44:02 crc kubenswrapper[4767]: E0128 18:44:02.320311 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerName="pull" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.320318 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerName="pull" Jan 28 18:44:02 crc kubenswrapper[4767]: E0128 18:44:02.320333 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerName="util" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.320340 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerName="util" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.320447 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="cedc3704-963c-4161-b9c9-cf2b6d8ea555" containerName="extract" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.320880 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.323885 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.323885 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.324160 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-zdzrt" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.336314 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-lkld9"] Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.386028 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56wt6\" (UniqueName: \"kubernetes.io/projected/a18398dd-746f-4915-8989-211b52555416-kube-api-access-56wt6\") pod \"nmstate-operator-646758c888-lkld9\" (UID: \"a18398dd-746f-4915-8989-211b52555416\") " pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.487455 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56wt6\" (UniqueName: \"kubernetes.io/projected/a18398dd-746f-4915-8989-211b52555416-kube-api-access-56wt6\") pod \"nmstate-operator-646758c888-lkld9\" (UID: \"a18398dd-746f-4915-8989-211b52555416\") " pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.507468 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56wt6\" (UniqueName: \"kubernetes.io/projected/a18398dd-746f-4915-8989-211b52555416-kube-api-access-56wt6\") pod \"nmstate-operator-646758c888-lkld9\" (UID: \"a18398dd-746f-4915-8989-211b52555416\") " pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.636313 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" Jan 28 18:44:02 crc kubenswrapper[4767]: I0128 18:44:02.858087 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-lkld9"] Jan 28 18:44:02 crc kubenswrapper[4767]: W0128 18:44:02.882830 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda18398dd_746f_4915_8989_211b52555416.slice/crio-c5e26e5c38b5bb51207c6a21130e768979e35a17f8372ac37297493581a393cb WatchSource:0}: Error finding container c5e26e5c38b5bb51207c6a21130e768979e35a17f8372ac37297493581a393cb: Status 404 returned error can't find the container with id c5e26e5c38b5bb51207c6a21130e768979e35a17f8372ac37297493581a393cb Jan 28 18:44:03 crc kubenswrapper[4767]: I0128 18:44:03.142326 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" event={"ID":"a18398dd-746f-4915-8989-211b52555416","Type":"ContainerStarted","Data":"c5e26e5c38b5bb51207c6a21130e768979e35a17f8372ac37297493581a393cb"} Jan 28 18:44:05 crc kubenswrapper[4767]: I0128 18:44:05.165373 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" event={"ID":"a18398dd-746f-4915-8989-211b52555416","Type":"ContainerStarted","Data":"2afc5bbc00f264627d0df793f99c050406c33b231568122829d053223e13a856"} Jan 28 18:44:05 crc kubenswrapper[4767]: I0128 18:44:05.186512 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-lkld9" podStartSLOduration=1.100652481 podStartE2EDuration="3.186494337s" podCreationTimestamp="2026-01-28 18:44:02 +0000 UTC" firstStartedPulling="2026-01-28 18:44:02.886750898 +0000 UTC m=+848.850933772" lastFinishedPulling="2026-01-28 18:44:04.972592754 +0000 UTC m=+850.936775628" observedRunningTime="2026-01-28 18:44:05.183171183 +0000 UTC m=+851.147354067" watchObservedRunningTime="2026-01-28 18:44:05.186494337 +0000 UTC m=+851.150677211" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.230520 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-n4gj6"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.231386 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.233701 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-z4qf2" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.241181 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnqp5\" (UniqueName: \"kubernetes.io/projected/ed568f05-84f3-43fb-b4eb-2adaef551020-kube-api-access-hnqp5\") pod \"nmstate-metrics-54757c584b-n4gj6\" (UID: \"ed568f05-84f3-43fb-b4eb-2adaef551020\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.242787 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.243474 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.245434 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.267469 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.270776 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-qhr2g"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.271576 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.292625 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-n4gj6"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.342734 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnqp5\" (UniqueName: \"kubernetes.io/projected/ed568f05-84f3-43fb-b4eb-2adaef551020-kube-api-access-hnqp5\") pod \"nmstate-metrics-54757c584b-n4gj6\" (UID: \"ed568f05-84f3-43fb-b4eb-2adaef551020\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.342790 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zpctv\" (UniqueName: \"kubernetes.io/projected/bef9ca58-f283-48a1-b354-d2c3f061ced9-kube-api-access-zpctv\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.342824 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/6f3dc9ef-68cf-4148-a915-fd3d30177771-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mdhlv\" (UID: \"6f3dc9ef-68cf-4148-a915-fd3d30177771\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.342848 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-dbus-socket\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.342865 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-ovs-socket\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.342972 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-nmstate-lock\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.343000 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5t2sk\" (UniqueName: \"kubernetes.io/projected/6f3dc9ef-68cf-4148-a915-fd3d30177771-kube-api-access-5t2sk\") pod \"nmstate-webhook-8474b5b9d8-mdhlv\" (UID: \"6f3dc9ef-68cf-4148-a915-fd3d30177771\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.366721 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnqp5\" (UniqueName: \"kubernetes.io/projected/ed568f05-84f3-43fb-b4eb-2adaef551020-kube-api-access-hnqp5\") pod \"nmstate-metrics-54757c584b-n4gj6\" (UID: \"ed568f05-84f3-43fb-b4eb-2adaef551020\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.399533 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.400505 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.401946 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-q89b7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.406027 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.406104 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.411310 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.444886 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/6f3dc9ef-68cf-4148-a915-fd3d30177771-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mdhlv\" (UID: \"6f3dc9ef-68cf-4148-a915-fd3d30177771\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.444979 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-dbus-socket\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445005 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-ovs-socket\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445032 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-nmstate-lock\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445065 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445093 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5t2sk\" (UniqueName: \"kubernetes.io/projected/6f3dc9ef-68cf-4148-a915-fd3d30177771-kube-api-access-5t2sk\") pod \"nmstate-webhook-8474b5b9d8-mdhlv\" (UID: \"6f3dc9ef-68cf-4148-a915-fd3d30177771\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445134 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmggk\" (UniqueName: \"kubernetes.io/projected/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-kube-api-access-vmggk\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445165 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445188 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zpctv\" (UniqueName: \"kubernetes.io/projected/bef9ca58-f283-48a1-b354-d2c3f061ced9-kube-api-access-zpctv\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445193 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-ovs-socket\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445224 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-nmstate-lock\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.445418 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/bef9ca58-f283-48a1-b354-d2c3f061ced9-dbus-socket\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: E0128 18:44:06.445520 4767 secret.go:188] Couldn't get secret openshift-nmstate/openshift-nmstate-webhook: secret "openshift-nmstate-webhook" not found Jan 28 18:44:06 crc kubenswrapper[4767]: E0128 18:44:06.445612 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6f3dc9ef-68cf-4148-a915-fd3d30177771-tls-key-pair podName:6f3dc9ef-68cf-4148-a915-fd3d30177771 nodeName:}" failed. No retries permitted until 2026-01-28 18:44:06.945584235 +0000 UTC m=+852.909767109 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "tls-key-pair" (UniqueName: "kubernetes.io/secret/6f3dc9ef-68cf-4148-a915-fd3d30177771-tls-key-pair") pod "nmstate-webhook-8474b5b9d8-mdhlv" (UID: "6f3dc9ef-68cf-4148-a915-fd3d30177771") : secret "openshift-nmstate-webhook" not found Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.469249 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5t2sk\" (UniqueName: \"kubernetes.io/projected/6f3dc9ef-68cf-4148-a915-fd3d30177771-kube-api-access-5t2sk\") pod \"nmstate-webhook-8474b5b9d8-mdhlv\" (UID: \"6f3dc9ef-68cf-4148-a915-fd3d30177771\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.475727 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zpctv\" (UniqueName: \"kubernetes.io/projected/bef9ca58-f283-48a1-b354-d2c3f061ced9-kube-api-access-zpctv\") pod \"nmstate-handler-qhr2g\" (UID: \"bef9ca58-f283-48a1-b354-d2c3f061ced9\") " pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.546004 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.546078 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmggk\" (UniqueName: \"kubernetes.io/projected/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-kube-api-access-vmggk\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.546105 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: E0128 18:44:06.546136 4767 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Jan 28 18:44:06 crc kubenswrapper[4767]: E0128 18:44:06.546229 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-plugin-serving-cert podName:eb6b00a4-771d-49c9-8220-6dcbf9e4a742 nodeName:}" failed. No retries permitted until 2026-01-28 18:44:07.046191348 +0000 UTC m=+853.010374222 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-plugin-serving-cert") pod "nmstate-console-plugin-7754f76f8b-465vd" (UID: "eb6b00a4-771d-49c9-8220-6dcbf9e4a742") : secret "plugin-serving-cert" not found Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.546982 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.551416 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.570183 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmggk\" (UniqueName: \"kubernetes.io/projected/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-kube-api-access-vmggk\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.597911 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:06 crc kubenswrapper[4767]: W0128 18:44:06.617648 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbef9ca58_f283_48a1_b354_d2c3f061ced9.slice/crio-7c64866f403ff1096def2783fde321aabd9b0e191581d653e73611042ee9a4ed WatchSource:0}: Error finding container 7c64866f403ff1096def2783fde321aabd9b0e191581d653e73611042ee9a4ed: Status 404 returned error can't find the container with id 7c64866f403ff1096def2783fde321aabd9b0e191581d653e73611042ee9a4ed Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.688812 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-54b8c44687-wkzt7"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.689842 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.714101 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54b8c44687-wkzt7"] Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.849018 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-service-ca\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.849089 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-trusted-ca-bundle\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.849114 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/931af415-b5c7-4a68-9b1f-473599a8d95a-console-oauth-config\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.849141 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/931af415-b5c7-4a68-9b1f-473599a8d95a-console-serving-cert\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.849185 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-console-config\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.849245 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-oauth-serving-cert\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.849269 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzs8s\" (UniqueName: \"kubernetes.io/projected/931af415-b5c7-4a68-9b1f-473599a8d95a-kube-api-access-gzs8s\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.863136 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-n4gj6"] Jan 28 18:44:06 crc kubenswrapper[4767]: W0128 18:44:06.870515 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded568f05_84f3_43fb_b4eb_2adaef551020.slice/crio-e7c8e49edbe2f2f9984faa01c0dc8c7ed7878bb4cd961a46dd686adc660ab8fd WatchSource:0}: Error finding container e7c8e49edbe2f2f9984faa01c0dc8c7ed7878bb4cd961a46dd686adc660ab8fd: Status 404 returned error can't find the container with id e7c8e49edbe2f2f9984faa01c0dc8c7ed7878bb4cd961a46dd686adc660ab8fd Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.950812 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-console-config\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.950905 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-oauth-serving-cert\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.950951 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzs8s\" (UniqueName: \"kubernetes.io/projected/931af415-b5c7-4a68-9b1f-473599a8d95a-kube-api-access-gzs8s\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.951118 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-service-ca\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.951162 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-trusted-ca-bundle\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.951177 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/931af415-b5c7-4a68-9b1f-473599a8d95a-console-oauth-config\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.951193 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/931af415-b5c7-4a68-9b1f-473599a8d95a-console-serving-cert\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.951235 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/6f3dc9ef-68cf-4148-a915-fd3d30177771-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mdhlv\" (UID: \"6f3dc9ef-68cf-4148-a915-fd3d30177771\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.952349 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-oauth-serving-cert\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.952511 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-console-config\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.953294 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-service-ca\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.954624 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/931af415-b5c7-4a68-9b1f-473599a8d95a-trusted-ca-bundle\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.961304 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/931af415-b5c7-4a68-9b1f-473599a8d95a-console-oauth-config\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.961478 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/6f3dc9ef-68cf-4148-a915-fd3d30177771-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-mdhlv\" (UID: \"6f3dc9ef-68cf-4148-a915-fd3d30177771\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.961529 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/931af415-b5c7-4a68-9b1f-473599a8d95a-console-serving-cert\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:06 crc kubenswrapper[4767]: I0128 18:44:06.976042 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzs8s\" (UniqueName: \"kubernetes.io/projected/931af415-b5c7-4a68-9b1f-473599a8d95a-kube-api-access-gzs8s\") pod \"console-54b8c44687-wkzt7\" (UID: \"931af415-b5c7-4a68-9b1f-473599a8d95a\") " pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.032282 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.052108 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.056641 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/eb6b00a4-771d-49c9-8220-6dcbf9e4a742-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-465vd\" (UID: \"eb6b00a4-771d-49c9-8220-6dcbf9e4a742\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.122307 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.122383 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.170951 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.171350 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.178911 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" event={"ID":"ed568f05-84f3-43fb-b4eb-2adaef551020","Type":"ContainerStarted","Data":"e7c8e49edbe2f2f9984faa01c0dc8c7ed7878bb4cd961a46dd686adc660ab8fd"} Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.180286 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-qhr2g" event={"ID":"bef9ca58-f283-48a1-b354-d2c3f061ced9","Type":"ContainerStarted","Data":"7c64866f403ff1096def2783fde321aabd9b0e191581d653e73611042ee9a4ed"} Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.234654 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.321914 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.344457 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-54b8c44687-wkzt7"] Jan 28 18:44:07 crc kubenswrapper[4767]: W0128 18:44:07.355510 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod931af415_b5c7_4a68_9b1f_473599a8d95a.slice/crio-1d47c8af98c5fd57450e07254fe24809ac89efbd5705c068ca66bf91ef98f146 WatchSource:0}: Error finding container 1d47c8af98c5fd57450e07254fe24809ac89efbd5705c068ca66bf91ef98f146: Status 404 returned error can't find the container with id 1d47c8af98c5fd57450e07254fe24809ac89efbd5705c068ca66bf91ef98f146 Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.400305 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv"] Jan 28 18:44:07 crc kubenswrapper[4767]: I0128 18:44:07.596807 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd"] Jan 28 18:44:07 crc kubenswrapper[4767]: W0128 18:44:07.615358 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb6b00a4_771d_49c9_8220_6dcbf9e4a742.slice/crio-b56319ee3da5dd273fc93415cc073f0e82aa22654ab243da0f956d9d3452857e WatchSource:0}: Error finding container b56319ee3da5dd273fc93415cc073f0e82aa22654ab243da0f956d9d3452857e: Status 404 returned error can't find the container with id b56319ee3da5dd273fc93415cc073f0e82aa22654ab243da0f956d9d3452857e Jan 28 18:44:08 crc kubenswrapper[4767]: I0128 18:44:08.188771 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" event={"ID":"6f3dc9ef-68cf-4148-a915-fd3d30177771","Type":"ContainerStarted","Data":"32d1b595b67f71969d80e9b6e2697a762a69a237a28e9e2a8586ceb28da590cb"} Jan 28 18:44:08 crc kubenswrapper[4767]: I0128 18:44:08.191428 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54b8c44687-wkzt7" event={"ID":"931af415-b5c7-4a68-9b1f-473599a8d95a","Type":"ContainerStarted","Data":"25fe8a3e64f44a4688865eee7f382658959dcb264192112f140ae88077a6a6da"} Jan 28 18:44:08 crc kubenswrapper[4767]: I0128 18:44:08.191465 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-54b8c44687-wkzt7" event={"ID":"931af415-b5c7-4a68-9b1f-473599a8d95a","Type":"ContainerStarted","Data":"1d47c8af98c5fd57450e07254fe24809ac89efbd5705c068ca66bf91ef98f146"} Jan 28 18:44:08 crc kubenswrapper[4767]: I0128 18:44:08.192735 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" event={"ID":"eb6b00a4-771d-49c9-8220-6dcbf9e4a742","Type":"ContainerStarted","Data":"b56319ee3da5dd273fc93415cc073f0e82aa22654ab243da0f956d9d3452857e"} Jan 28 18:44:08 crc kubenswrapper[4767]: I0128 18:44:08.207867 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-54b8c44687-wkzt7" podStartSLOduration=2.207848952 podStartE2EDuration="2.207848952s" podCreationTimestamp="2026-01-28 18:44:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:44:08.205981923 +0000 UTC m=+854.170164807" watchObservedRunningTime="2026-01-28 18:44:08.207848952 +0000 UTC m=+854.172031816" Jan 28 18:44:09 crc kubenswrapper[4767]: I0128 18:44:09.601341 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wmms8"] Jan 28 18:44:09 crc kubenswrapper[4767]: I0128 18:44:09.602261 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wmms8" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="registry-server" containerID="cri-o://c27da083a8cd317df0c651347665c6021ab16d88b843741858b3a3d7f202fa67" gracePeriod=2 Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.214548 4767 generic.go:334] "Generic (PLEG): container finished" podID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerID="c27da083a8cd317df0c651347665c6021ab16d88b843741858b3a3d7f202fa67" exitCode=0 Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.214588 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmms8" event={"ID":"79b4937a-86f4-41da-83c6-8c8c9164552c","Type":"ContainerDied","Data":"c27da083a8cd317df0c651347665c6021ab16d88b843741858b3a3d7f202fa67"} Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.816953 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.923847 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j2k7n\" (UniqueName: \"kubernetes.io/projected/79b4937a-86f4-41da-83c6-8c8c9164552c-kube-api-access-j2k7n\") pod \"79b4937a-86f4-41da-83c6-8c8c9164552c\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.923927 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-catalog-content\") pod \"79b4937a-86f4-41da-83c6-8c8c9164552c\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.923951 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-utilities\") pod \"79b4937a-86f4-41da-83c6-8c8c9164552c\" (UID: \"79b4937a-86f4-41da-83c6-8c8c9164552c\") " Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.925043 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-utilities" (OuterVolumeSpecName: "utilities") pod "79b4937a-86f4-41da-83c6-8c8c9164552c" (UID: "79b4937a-86f4-41da-83c6-8c8c9164552c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:44:11 crc kubenswrapper[4767]: I0128 18:44:11.930662 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79b4937a-86f4-41da-83c6-8c8c9164552c-kube-api-access-j2k7n" (OuterVolumeSpecName: "kube-api-access-j2k7n") pod "79b4937a-86f4-41da-83c6-8c8c9164552c" (UID: "79b4937a-86f4-41da-83c6-8c8c9164552c"). InnerVolumeSpecName "kube-api-access-j2k7n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.026067 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j2k7n\" (UniqueName: \"kubernetes.io/projected/79b4937a-86f4-41da-83c6-8c8c9164552c-kube-api-access-j2k7n\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.026387 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.062799 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79b4937a-86f4-41da-83c6-8c8c9164552c" (UID: "79b4937a-86f4-41da-83c6-8c8c9164552c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.127223 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79b4937a-86f4-41da-83c6-8c8c9164552c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.228417 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wmms8" event={"ID":"79b4937a-86f4-41da-83c6-8c8c9164552c","Type":"ContainerDied","Data":"cc5cc9ed6d805b222cf97dde3fd52232d33e862eecd18b97a970096354c43056"} Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.228475 4767 scope.go:117] "RemoveContainer" containerID="c27da083a8cd317df0c651347665c6021ab16d88b843741858b3a3d7f202fa67" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.228607 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wmms8" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.245685 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" event={"ID":"ed568f05-84f3-43fb-b4eb-2adaef551020","Type":"ContainerStarted","Data":"36ba1b4b9f1eecf95b7fe5079015710f092a09317b059517b9cb6467ad036ce6"} Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.248075 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" event={"ID":"6f3dc9ef-68cf-4148-a915-fd3d30177771","Type":"ContainerStarted","Data":"6150c573d5e02619d3a86b1422904c75107e9bc423d834ee93502ef766d4d2f8"} Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.248314 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.263276 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wmms8"] Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.268335 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wmms8"] Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.268606 4767 scope.go:117] "RemoveContainer" containerID="e92e30f5da756a9536e7ae3611b6eb5add59db197cbe3b75d4e21cb42359f2e7" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.289286 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" podStartSLOduration=1.662922515 podStartE2EDuration="6.289268496s" podCreationTimestamp="2026-01-28 18:44:06 +0000 UTC" firstStartedPulling="2026-01-28 18:44:07.414138308 +0000 UTC m=+853.378321182" lastFinishedPulling="2026-01-28 18:44:12.040484289 +0000 UTC m=+858.004667163" observedRunningTime="2026-01-28 18:44:12.284899589 +0000 UTC m=+858.249082453" watchObservedRunningTime="2026-01-28 18:44:12.289268496 +0000 UTC m=+858.253451370" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.297572 4767 scope.go:117] "RemoveContainer" containerID="1c945cca14604c4e63aa25a19a6735a29b2a5a9cfe224d8b5ad2458b369fc682" Jan 28 18:44:12 crc kubenswrapper[4767]: I0128 18:44:12.804793 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" path="/var/lib/kubelet/pods/79b4937a-86f4-41da-83c6-8c8c9164552c/volumes" Jan 28 18:44:13 crc kubenswrapper[4767]: I0128 18:44:13.261647 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" event={"ID":"eb6b00a4-771d-49c9-8220-6dcbf9e4a742","Type":"ContainerStarted","Data":"d89104ad7a6049981e0d95b6614e6b64a07f013c5f0502a9ba5b524889d025a5"} Jan 28 18:44:13 crc kubenswrapper[4767]: I0128 18:44:13.263451 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-qhr2g" event={"ID":"bef9ca58-f283-48a1-b354-d2c3f061ced9","Type":"ContainerStarted","Data":"e8d3f107a80c99f5ee031e8f85acf7eba531fa4b22782e2553e5ed9278723118"} Jan 28 18:44:13 crc kubenswrapper[4767]: I0128 18:44:13.283407 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-465vd" podStartSLOduration=1.901640306 podStartE2EDuration="7.2833792s" podCreationTimestamp="2026-01-28 18:44:06 +0000 UTC" firstStartedPulling="2026-01-28 18:44:07.61938662 +0000 UTC m=+853.583569494" lastFinishedPulling="2026-01-28 18:44:13.001125504 +0000 UTC m=+858.965308388" observedRunningTime="2026-01-28 18:44:13.278742764 +0000 UTC m=+859.242925648" watchObservedRunningTime="2026-01-28 18:44:13.2833792 +0000 UTC m=+859.247562074" Jan 28 18:44:13 crc kubenswrapper[4767]: I0128 18:44:13.303875 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-qhr2g" podStartSLOduration=1.893078009 podStartE2EDuration="7.303859262s" podCreationTimestamp="2026-01-28 18:44:06 +0000 UTC" firstStartedPulling="2026-01-28 18:44:06.619924809 +0000 UTC m=+852.584107683" lastFinishedPulling="2026-01-28 18:44:12.030706062 +0000 UTC m=+857.994888936" observedRunningTime="2026-01-28 18:44:13.299265377 +0000 UTC m=+859.263448261" watchObservedRunningTime="2026-01-28 18:44:13.303859262 +0000 UTC m=+859.268042136" Jan 28 18:44:14 crc kubenswrapper[4767]: I0128 18:44:14.270696 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:15 crc kubenswrapper[4767]: I0128 18:44:15.277505 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" event={"ID":"ed568f05-84f3-43fb-b4eb-2adaef551020","Type":"ContainerStarted","Data":"fa991d3d386074d077f9ec2fa2c2801df3e9d26430fd8dff689096f929a292e3"} Jan 28 18:44:15 crc kubenswrapper[4767]: I0128 18:44:15.302916 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-n4gj6" podStartSLOduration=1.763735365 podStartE2EDuration="9.302894958s" podCreationTimestamp="2026-01-28 18:44:06 +0000 UTC" firstStartedPulling="2026-01-28 18:44:06.87428392 +0000 UTC m=+852.838466794" lastFinishedPulling="2026-01-28 18:44:14.413443513 +0000 UTC m=+860.377626387" observedRunningTime="2026-01-28 18:44:15.293854984 +0000 UTC m=+861.258037878" watchObservedRunningTime="2026-01-28 18:44:15.302894958 +0000 UTC m=+861.267077832" Jan 28 18:44:17 crc kubenswrapper[4767]: I0128 18:44:17.033293 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:17 crc kubenswrapper[4767]: I0128 18:44:17.033672 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:17 crc kubenswrapper[4767]: I0128 18:44:17.040031 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:17 crc kubenswrapper[4767]: I0128 18:44:17.292411 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-54b8c44687-wkzt7" Jan 28 18:44:17 crc kubenswrapper[4767]: I0128 18:44:17.342194 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qfdzz"] Jan 28 18:44:21 crc kubenswrapper[4767]: I0128 18:44:21.622690 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-qhr2g" Jan 28 18:44:27 crc kubenswrapper[4767]: I0128 18:44:27.176746 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-mdhlv" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.759913 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28"] Jan 28 18:44:38 crc kubenswrapper[4767]: E0128 18:44:38.760695 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="extract-utilities" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.760711 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="extract-utilities" Jan 28 18:44:38 crc kubenswrapper[4767]: E0128 18:44:38.760722 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="extract-content" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.760728 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="extract-content" Jan 28 18:44:38 crc kubenswrapper[4767]: E0128 18:44:38.760744 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="registry-server" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.760750 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="registry-server" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.760857 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="79b4937a-86f4-41da-83c6-8c8c9164552c" containerName="registry-server" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.761572 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.764617 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.775724 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28"] Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.790944 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqhkc\" (UniqueName: \"kubernetes.io/projected/6eecf577-4963-45f9-a5fa-96bfae201c3c-kube-api-access-fqhkc\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.790992 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.791022 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.891988 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqhkc\" (UniqueName: \"kubernetes.io/projected/6eecf577-4963-45f9-a5fa-96bfae201c3c-kube-api-access-fqhkc\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.892066 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.892097 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.892816 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.892944 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:38 crc kubenswrapper[4767]: I0128 18:44:38.920911 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqhkc\" (UniqueName: \"kubernetes.io/projected/6eecf577-4963-45f9-a5fa-96bfae201c3c-kube-api-access-fqhkc\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:39 crc kubenswrapper[4767]: I0128 18:44:39.077642 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:39 crc kubenswrapper[4767]: I0128 18:44:39.556239 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28"] Jan 28 18:44:40 crc kubenswrapper[4767]: I0128 18:44:40.447021 4767 generic.go:334] "Generic (PLEG): container finished" podID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerID="bdd0b46919b5f3a233d7fd5512c5b98cf30b80a86c77498492f4dc675a0a643b" exitCode=0 Jan 28 18:44:40 crc kubenswrapper[4767]: I0128 18:44:40.447067 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" event={"ID":"6eecf577-4963-45f9-a5fa-96bfae201c3c","Type":"ContainerDied","Data":"bdd0b46919b5f3a233d7fd5512c5b98cf30b80a86c77498492f4dc675a0a643b"} Jan 28 18:44:40 crc kubenswrapper[4767]: I0128 18:44:40.447095 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" event={"ID":"6eecf577-4963-45f9-a5fa-96bfae201c3c","Type":"ContainerStarted","Data":"58b608cf24a5241302666b5723366db4d5e700db4e368d62c8615cd62e653187"} Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.391188 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-qfdzz" podUID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" containerName="console" containerID="cri-o://b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74" gracePeriod=15 Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.465455 4767 generic.go:334] "Generic (PLEG): container finished" podID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerID="230b523bcd3a243a1d2cb6b5d86043adb4f5b2b86ccf75a24ee6d380f566dfd5" exitCode=0 Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.465518 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" event={"ID":"6eecf577-4963-45f9-a5fa-96bfae201c3c","Type":"ContainerDied","Data":"230b523bcd3a243a1d2cb6b5d86043adb4f5b2b86ccf75a24ee6d380f566dfd5"} Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.802127 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qfdzz_1ac82155-1d09-4371-869a-e7edb9c4d5bc/console/0.log" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.802706 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.944933 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-config\") pod \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.944994 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-oauth-serving-cert\") pod \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.945054 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-service-ca\") pod \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.945092 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-serving-cert\") pod \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.945162 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-trusted-ca-bundle\") pod \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.945191 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-oauth-config\") pod \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.945248 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hlgw\" (UniqueName: \"kubernetes.io/projected/1ac82155-1d09-4371-869a-e7edb9c4d5bc-kube-api-access-7hlgw\") pod \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\" (UID: \"1ac82155-1d09-4371-869a-e7edb9c4d5bc\") " Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.946303 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-service-ca" (OuterVolumeSpecName: "service-ca") pod "1ac82155-1d09-4371-869a-e7edb9c4d5bc" (UID: "1ac82155-1d09-4371-869a-e7edb9c4d5bc"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.946722 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1ac82155-1d09-4371-869a-e7edb9c4d5bc" (UID: "1ac82155-1d09-4371-869a-e7edb9c4d5bc"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.946889 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "1ac82155-1d09-4371-869a-e7edb9c4d5bc" (UID: "1ac82155-1d09-4371-869a-e7edb9c4d5bc"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.947115 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-config" (OuterVolumeSpecName: "console-config") pod "1ac82155-1d09-4371-869a-e7edb9c4d5bc" (UID: "1ac82155-1d09-4371-869a-e7edb9c4d5bc"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.954169 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "1ac82155-1d09-4371-869a-e7edb9c4d5bc" (UID: "1ac82155-1d09-4371-869a-e7edb9c4d5bc"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.954974 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ac82155-1d09-4371-869a-e7edb9c4d5bc-kube-api-access-7hlgw" (OuterVolumeSpecName: "kube-api-access-7hlgw") pod "1ac82155-1d09-4371-869a-e7edb9c4d5bc" (UID: "1ac82155-1d09-4371-869a-e7edb9c4d5bc"). InnerVolumeSpecName "kube-api-access-7hlgw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:44:42 crc kubenswrapper[4767]: I0128 18:44:42.957295 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "1ac82155-1d09-4371-869a-e7edb9c4d5bc" (UID: "1ac82155-1d09-4371-869a-e7edb9c4d5bc"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.048079 4767 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.048121 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hlgw\" (UniqueName: \"kubernetes.io/projected/1ac82155-1d09-4371-869a-e7edb9c4d5bc-kube-api-access-7hlgw\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.048132 4767 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.048142 4767 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.048151 4767 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-service-ca\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.048160 4767 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/1ac82155-1d09-4371-869a-e7edb9c4d5bc-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.048169 4767 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1ac82155-1d09-4371-869a-e7edb9c4d5bc-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.473773 4767 generic.go:334] "Generic (PLEG): container finished" podID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerID="a793f287027872a1e320a80d8776d29844c215cad36a89e609be569cd028aa82" exitCode=0 Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.473894 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" event={"ID":"6eecf577-4963-45f9-a5fa-96bfae201c3c","Type":"ContainerDied","Data":"a793f287027872a1e320a80d8776d29844c215cad36a89e609be569cd028aa82"} Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.475686 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-qfdzz_1ac82155-1d09-4371-869a-e7edb9c4d5bc/console/0.log" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.475731 4767 generic.go:334] "Generic (PLEG): container finished" podID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" containerID="b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74" exitCode=2 Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.475758 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdzz" event={"ID":"1ac82155-1d09-4371-869a-e7edb9c4d5bc","Type":"ContainerDied","Data":"b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74"} Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.475773 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-qfdzz" event={"ID":"1ac82155-1d09-4371-869a-e7edb9c4d5bc","Type":"ContainerDied","Data":"a50f98706b484dedf9b4a0cb1279c7c96f9ba1d38115aee7a317e075bb1a5eeb"} Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.475790 4767 scope.go:117] "RemoveContainer" containerID="b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.475826 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-qfdzz" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.494633 4767 scope.go:117] "RemoveContainer" containerID="b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74" Jan 28 18:44:43 crc kubenswrapper[4767]: E0128 18:44:43.496455 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74\": container with ID starting with b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74 not found: ID does not exist" containerID="b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.496503 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74"} err="failed to get container status \"b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74\": rpc error: code = NotFound desc = could not find container \"b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74\": container with ID starting with b2c097c40211dae43fceca687b56b18d4f213e6a1e6689cdc3ca923bff742c74 not found: ID does not exist" Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.511351 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-qfdzz"] Jan 28 18:44:43 crc kubenswrapper[4767]: I0128 18:44:43.516666 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-qfdzz"] Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.724747 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.807792 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" path="/var/lib/kubelet/pods/1ac82155-1d09-4371-869a-e7edb9c4d5bc/volumes" Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.872682 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-bundle\") pod \"6eecf577-4963-45f9-a5fa-96bfae201c3c\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.873030 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-util\") pod \"6eecf577-4963-45f9-a5fa-96bfae201c3c\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.873080 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqhkc\" (UniqueName: \"kubernetes.io/projected/6eecf577-4963-45f9-a5fa-96bfae201c3c-kube-api-access-fqhkc\") pod \"6eecf577-4963-45f9-a5fa-96bfae201c3c\" (UID: \"6eecf577-4963-45f9-a5fa-96bfae201c3c\") " Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.874388 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-bundle" (OuterVolumeSpecName: "bundle") pod "6eecf577-4963-45f9-a5fa-96bfae201c3c" (UID: "6eecf577-4963-45f9-a5fa-96bfae201c3c"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.874688 4767 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.877122 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6eecf577-4963-45f9-a5fa-96bfae201c3c-kube-api-access-fqhkc" (OuterVolumeSpecName: "kube-api-access-fqhkc") pod "6eecf577-4963-45f9-a5fa-96bfae201c3c" (UID: "6eecf577-4963-45f9-a5fa-96bfae201c3c"). InnerVolumeSpecName "kube-api-access-fqhkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:44:44 crc kubenswrapper[4767]: I0128 18:44:44.976427 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqhkc\" (UniqueName: \"kubernetes.io/projected/6eecf577-4963-45f9-a5fa-96bfae201c3c-kube-api-access-fqhkc\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:45 crc kubenswrapper[4767]: I0128 18:44:45.206027 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-util" (OuterVolumeSpecName: "util") pod "6eecf577-4963-45f9-a5fa-96bfae201c3c" (UID: "6eecf577-4963-45f9-a5fa-96bfae201c3c"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:44:45 crc kubenswrapper[4767]: I0128 18:44:45.279276 4767 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6eecf577-4963-45f9-a5fa-96bfae201c3c-util\") on node \"crc\" DevicePath \"\"" Jan 28 18:44:45 crc kubenswrapper[4767]: I0128 18:44:45.490420 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" event={"ID":"6eecf577-4963-45f9-a5fa-96bfae201c3c","Type":"ContainerDied","Data":"58b608cf24a5241302666b5723366db4d5e700db4e368d62c8615cd62e653187"} Jan 28 18:44:45 crc kubenswrapper[4767]: I0128 18:44:45.490465 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="58b608cf24a5241302666b5723366db4d5e700db4e368d62c8615cd62e653187" Jan 28 18:44:45 crc kubenswrapper[4767]: I0128 18:44:45.490540 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.008023 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt"] Jan 28 18:44:54 crc kubenswrapper[4767]: E0128 18:44:54.009880 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerName="util" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.009963 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerName="util" Jan 28 18:44:54 crc kubenswrapper[4767]: E0128 18:44:54.010029 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerName="extract" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.010090 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerName="extract" Jan 28 18:44:54 crc kubenswrapper[4767]: E0128 18:44:54.010167 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" containerName="console" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.010255 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" containerName="console" Jan 28 18:44:54 crc kubenswrapper[4767]: E0128 18:44:54.010340 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerName="pull" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.010400 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerName="pull" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.010588 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ac82155-1d09-4371-869a-e7edb9c4d5bc" containerName="console" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.010657 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="6eecf577-4963-45f9-a5fa-96bfae201c3c" containerName="extract" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.011223 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.015603 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.015767 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-v95h9" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.015854 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.016070 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.018112 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.033501 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt"] Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.201557 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2b81ef34-3602-4a95-adaf-e2168b7c2827-apiservice-cert\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.201643 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfql4\" (UniqueName: \"kubernetes.io/projected/2b81ef34-3602-4a95-adaf-e2168b7c2827-kube-api-access-mfql4\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.202384 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2b81ef34-3602-4a95-adaf-e2168b7c2827-webhook-cert\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.303144 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2b81ef34-3602-4a95-adaf-e2168b7c2827-apiservice-cert\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.303215 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2b81ef34-3602-4a95-adaf-e2168b7c2827-webhook-cert\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.303235 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfql4\" (UniqueName: \"kubernetes.io/projected/2b81ef34-3602-4a95-adaf-e2168b7c2827-kube-api-access-mfql4\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.311356 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2b81ef34-3602-4a95-adaf-e2168b7c2827-webhook-cert\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.311390 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2b81ef34-3602-4a95-adaf-e2168b7c2827-apiservice-cert\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.331124 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfql4\" (UniqueName: \"kubernetes.io/projected/2b81ef34-3602-4a95-adaf-e2168b7c2827-kube-api-access-mfql4\") pod \"metallb-operator-controller-manager-6f4744495b-dgmjt\" (UID: \"2b81ef34-3602-4a95-adaf-e2168b7c2827\") " pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.464674 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4"] Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.465439 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.467565 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-9wzgt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.473465 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.473472 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.484732 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4"] Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.606335 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a4b668f3-0c0f-43bb-8d16-c460c756226c-webhook-cert\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.606380 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2mh8\" (UniqueName: \"kubernetes.io/projected/a4b668f3-0c0f-43bb-8d16-c460c756226c-kube-api-access-c2mh8\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.606403 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a4b668f3-0c0f-43bb-8d16-c460c756226c-apiservice-cert\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.626433 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.707331 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a4b668f3-0c0f-43bb-8d16-c460c756226c-webhook-cert\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.707382 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2mh8\" (UniqueName: \"kubernetes.io/projected/a4b668f3-0c0f-43bb-8d16-c460c756226c-kube-api-access-c2mh8\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.707405 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a4b668f3-0c0f-43bb-8d16-c460c756226c-apiservice-cert\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.713930 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/a4b668f3-0c0f-43bb-8d16-c460c756226c-webhook-cert\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.714097 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/a4b668f3-0c0f-43bb-8d16-c460c756226c-apiservice-cert\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.737136 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2mh8\" (UniqueName: \"kubernetes.io/projected/a4b668f3-0c0f-43bb-8d16-c460c756226c-kube-api-access-c2mh8\") pod \"metallb-operator-webhook-server-6584c89557-hp6s4\" (UID: \"a4b668f3-0c0f-43bb-8d16-c460c756226c\") " pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.784129 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-9wzgt" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.791399 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:44:54 crc kubenswrapper[4767]: I0128 18:44:54.892744 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt"] Jan 28 18:44:54 crc kubenswrapper[4767]: W0128 18:44:54.903624 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b81ef34_3602_4a95_adaf_e2168b7c2827.slice/crio-feedf816c507fc6efa0ecec95f916b46dba6eba3ecb87bcc8c8979c6d512e18a WatchSource:0}: Error finding container feedf816c507fc6efa0ecec95f916b46dba6eba3ecb87bcc8c8979c6d512e18a: Status 404 returned error can't find the container with id feedf816c507fc6efa0ecec95f916b46dba6eba3ecb87bcc8c8979c6d512e18a Jan 28 18:44:55 crc kubenswrapper[4767]: I0128 18:44:55.050221 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4"] Jan 28 18:44:55 crc kubenswrapper[4767]: W0128 18:44:55.055423 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4b668f3_0c0f_43bb_8d16_c460c756226c.slice/crio-b5183e7b85e2ed4a76e4d667949674880377d220e156710ee938767b7ac079b9 WatchSource:0}: Error finding container b5183e7b85e2ed4a76e4d667949674880377d220e156710ee938767b7ac079b9: Status 404 returned error can't find the container with id b5183e7b85e2ed4a76e4d667949674880377d220e156710ee938767b7ac079b9 Jan 28 18:44:55 crc kubenswrapper[4767]: I0128 18:44:55.561548 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" event={"ID":"a4b668f3-0c0f-43bb-8d16-c460c756226c","Type":"ContainerStarted","Data":"b5183e7b85e2ed4a76e4d667949674880377d220e156710ee938767b7ac079b9"} Jan 28 18:44:55 crc kubenswrapper[4767]: I0128 18:44:55.563034 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" event={"ID":"2b81ef34-3602-4a95-adaf-e2168b7c2827","Type":"ContainerStarted","Data":"feedf816c507fc6efa0ecec95f916b46dba6eba3ecb87bcc8c8979c6d512e18a"} Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.143834 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl"] Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.145384 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.147488 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.147718 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.156037 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl"] Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.301168 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcd5n\" (UniqueName: \"kubernetes.io/projected/f42ec511-1853-412d-930e-8908b4ca7619-kube-api-access-fcd5n\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.301500 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f42ec511-1853-412d-930e-8908b4ca7619-secret-volume\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.301623 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f42ec511-1853-412d-930e-8908b4ca7619-config-volume\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.403258 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f42ec511-1853-412d-930e-8908b4ca7619-secret-volume\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.403307 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f42ec511-1853-412d-930e-8908b4ca7619-config-volume\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.403346 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcd5n\" (UniqueName: \"kubernetes.io/projected/f42ec511-1853-412d-930e-8908b4ca7619-kube-api-access-fcd5n\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.404361 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f42ec511-1853-412d-930e-8908b4ca7619-config-volume\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.409286 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f42ec511-1853-412d-930e-8908b4ca7619-secret-volume\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.444630 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcd5n\" (UniqueName: \"kubernetes.io/projected/f42ec511-1853-412d-930e-8908b4ca7619-kube-api-access-fcd5n\") pod \"collect-profiles-29493765-pwhjl\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.467323 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.610812 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" event={"ID":"a4b668f3-0c0f-43bb-8d16-c460c756226c","Type":"ContainerStarted","Data":"0f63395bfe76c1ef66ba27c8312f4950e5b88c1d177786f248d6e9efa0ee124e"} Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.611288 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.619816 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" event={"ID":"2b81ef34-3602-4a95-adaf-e2168b7c2827","Type":"ContainerStarted","Data":"a0036aaa3ec4bf108f3fc6d3fc782b7af5d75bcb1753618b2bcda60e71ff9278"} Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.620044 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.665044 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" podStartSLOduration=1.772585913 podStartE2EDuration="6.665028124s" podCreationTimestamp="2026-01-28 18:44:54 +0000 UTC" firstStartedPulling="2026-01-28 18:44:55.057016028 +0000 UTC m=+901.021198902" lastFinishedPulling="2026-01-28 18:44:59.949458239 +0000 UTC m=+905.913641113" observedRunningTime="2026-01-28 18:45:00.659844191 +0000 UTC m=+906.624027065" watchObservedRunningTime="2026-01-28 18:45:00.665028124 +0000 UTC m=+906.629210998" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.777195 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" podStartSLOduration=2.738812444 podStartE2EDuration="7.777176568s" podCreationTimestamp="2026-01-28 18:44:53 +0000 UTC" firstStartedPulling="2026-01-28 18:44:54.907186003 +0000 UTC m=+900.871368877" lastFinishedPulling="2026-01-28 18:44:59.945550127 +0000 UTC m=+905.909733001" observedRunningTime="2026-01-28 18:45:00.743693349 +0000 UTC m=+906.707876223" watchObservedRunningTime="2026-01-28 18:45:00.777176568 +0000 UTC m=+906.741359442" Jan 28 18:45:00 crc kubenswrapper[4767]: I0128 18:45:00.777579 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl"] Jan 28 18:45:01 crc kubenswrapper[4767]: I0128 18:45:01.636769 4767 generic.go:334] "Generic (PLEG): container finished" podID="f42ec511-1853-412d-930e-8908b4ca7619" containerID="7ebd66cd982a4c7988ab09f7878b28346c5a8c818a257bc93d6637397e86f2ac" exitCode=0 Jan 28 18:45:01 crc kubenswrapper[4767]: I0128 18:45:01.636889 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" event={"ID":"f42ec511-1853-412d-930e-8908b4ca7619","Type":"ContainerDied","Data":"7ebd66cd982a4c7988ab09f7878b28346c5a8c818a257bc93d6637397e86f2ac"} Jan 28 18:45:01 crc kubenswrapper[4767]: I0128 18:45:01.637345 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" event={"ID":"f42ec511-1853-412d-930e-8908b4ca7619","Type":"ContainerStarted","Data":"239177c2361b04921d0a8c47d3333b44f202385551fe3f3fb1c86c8e5900ccce"} Jan 28 18:45:02 crc kubenswrapper[4767]: I0128 18:45:02.878163 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.055402 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcd5n\" (UniqueName: \"kubernetes.io/projected/f42ec511-1853-412d-930e-8908b4ca7619-kube-api-access-fcd5n\") pod \"f42ec511-1853-412d-930e-8908b4ca7619\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.055515 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f42ec511-1853-412d-930e-8908b4ca7619-secret-volume\") pod \"f42ec511-1853-412d-930e-8908b4ca7619\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.055730 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f42ec511-1853-412d-930e-8908b4ca7619-config-volume\") pod \"f42ec511-1853-412d-930e-8908b4ca7619\" (UID: \"f42ec511-1853-412d-930e-8908b4ca7619\") " Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.056344 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f42ec511-1853-412d-930e-8908b4ca7619-config-volume" (OuterVolumeSpecName: "config-volume") pod "f42ec511-1853-412d-930e-8908b4ca7619" (UID: "f42ec511-1853-412d-930e-8908b4ca7619"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.061128 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f42ec511-1853-412d-930e-8908b4ca7619-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f42ec511-1853-412d-930e-8908b4ca7619" (UID: "f42ec511-1853-412d-930e-8908b4ca7619"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.061848 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f42ec511-1853-412d-930e-8908b4ca7619-kube-api-access-fcd5n" (OuterVolumeSpecName: "kube-api-access-fcd5n") pod "f42ec511-1853-412d-930e-8908b4ca7619" (UID: "f42ec511-1853-412d-930e-8908b4ca7619"). InnerVolumeSpecName "kube-api-access-fcd5n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.158430 4767 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f42ec511-1853-412d-930e-8908b4ca7619-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.158497 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcd5n\" (UniqueName: \"kubernetes.io/projected/f42ec511-1853-412d-930e-8908b4ca7619-kube-api-access-fcd5n\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.158512 4767 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f42ec511-1853-412d-930e-8908b4ca7619-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.649993 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" event={"ID":"f42ec511-1853-412d-930e-8908b4ca7619","Type":"ContainerDied","Data":"239177c2361b04921d0a8c47d3333b44f202385551fe3f3fb1c86c8e5900ccce"} Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.650044 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="239177c2361b04921d0a8c47d3333b44f202385551fe3f3fb1c86c8e5900ccce" Jan 28 18:45:03 crc kubenswrapper[4767]: I0128 18:45:03.650160 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl" Jan 28 18:45:14 crc kubenswrapper[4767]: I0128 18:45:14.803618 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-6584c89557-hp6s4" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.657680 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5xhtx"] Jan 28 18:45:16 crc kubenswrapper[4767]: E0128 18:45:16.658279 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f42ec511-1853-412d-930e-8908b4ca7619" containerName="collect-profiles" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.658292 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f42ec511-1853-412d-930e-8908b4ca7619" containerName="collect-profiles" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.658403 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f42ec511-1853-412d-930e-8908b4ca7619" containerName="collect-profiles" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.659235 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.681578 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xhtx"] Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.773111 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxvrz\" (UniqueName: \"kubernetes.io/projected/d5228e91-9b73-483e-b7c2-cff89b833647-kube-api-access-gxvrz\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.773167 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-utilities\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.773379 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-catalog-content\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.875009 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxvrz\" (UniqueName: \"kubernetes.io/projected/d5228e91-9b73-483e-b7c2-cff89b833647-kube-api-access-gxvrz\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.875078 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-utilities\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.875228 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-catalog-content\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.875691 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-catalog-content\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.875897 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-utilities\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.903124 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxvrz\" (UniqueName: \"kubernetes.io/projected/d5228e91-9b73-483e-b7c2-cff89b833647-kube-api-access-gxvrz\") pod \"redhat-marketplace-5xhtx\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:16 crc kubenswrapper[4767]: I0128 18:45:16.977080 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:17 crc kubenswrapper[4767]: I0128 18:45:17.405398 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xhtx"] Jan 28 18:45:17 crc kubenswrapper[4767]: I0128 18:45:17.735601 4767 generic.go:334] "Generic (PLEG): container finished" podID="d5228e91-9b73-483e-b7c2-cff89b833647" containerID="95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7" exitCode=0 Jan 28 18:45:17 crc kubenswrapper[4767]: I0128 18:45:17.735675 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xhtx" event={"ID":"d5228e91-9b73-483e-b7c2-cff89b833647","Type":"ContainerDied","Data":"95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7"} Jan 28 18:45:17 crc kubenswrapper[4767]: I0128 18:45:17.735795 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xhtx" event={"ID":"d5228e91-9b73-483e-b7c2-cff89b833647","Type":"ContainerStarted","Data":"3546631f4bedcde9b695f534ad3c785e153acbc57fff1060af11bcd776585e12"} Jan 28 18:45:20 crc kubenswrapper[4767]: I0128 18:45:20.756466 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xhtx" event={"ID":"d5228e91-9b73-483e-b7c2-cff89b833647","Type":"ContainerStarted","Data":"cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf"} Jan 28 18:45:21 crc kubenswrapper[4767]: I0128 18:45:21.770462 4767 generic.go:334] "Generic (PLEG): container finished" podID="d5228e91-9b73-483e-b7c2-cff89b833647" containerID="cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf" exitCode=0 Jan 28 18:45:21 crc kubenswrapper[4767]: I0128 18:45:21.770601 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xhtx" event={"ID":"d5228e91-9b73-483e-b7c2-cff89b833647","Type":"ContainerDied","Data":"cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf"} Jan 28 18:45:22 crc kubenswrapper[4767]: I0128 18:45:22.779739 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xhtx" event={"ID":"d5228e91-9b73-483e-b7c2-cff89b833647","Type":"ContainerStarted","Data":"930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9"} Jan 28 18:45:22 crc kubenswrapper[4767]: I0128 18:45:22.805198 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5xhtx" podStartSLOduration=2.366022522 podStartE2EDuration="6.805170226s" podCreationTimestamp="2026-01-28 18:45:16 +0000 UTC" firstStartedPulling="2026-01-28 18:45:17.7376854 +0000 UTC m=+923.701868274" lastFinishedPulling="2026-01-28 18:45:22.176833104 +0000 UTC m=+928.141015978" observedRunningTime="2026-01-28 18:45:22.801005185 +0000 UTC m=+928.765188069" watchObservedRunningTime="2026-01-28 18:45:22.805170226 +0000 UTC m=+928.769353100" Jan 28 18:45:26 crc kubenswrapper[4767]: I0128 18:45:26.977455 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:26 crc kubenswrapper[4767]: I0128 18:45:26.977909 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:27 crc kubenswrapper[4767]: I0128 18:45:27.025901 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:27 crc kubenswrapper[4767]: I0128 18:45:27.853137 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:29 crc kubenswrapper[4767]: I0128 18:45:29.450433 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xhtx"] Jan 28 18:45:29 crc kubenswrapper[4767]: I0128 18:45:29.826993 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5xhtx" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="registry-server" containerID="cri-o://930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9" gracePeriod=2 Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.184196 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.264714 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-catalog-content\") pod \"d5228e91-9b73-483e-b7c2-cff89b833647\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.264840 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-utilities\") pod \"d5228e91-9b73-483e-b7c2-cff89b833647\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.264881 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxvrz\" (UniqueName: \"kubernetes.io/projected/d5228e91-9b73-483e-b7c2-cff89b833647-kube-api-access-gxvrz\") pod \"d5228e91-9b73-483e-b7c2-cff89b833647\" (UID: \"d5228e91-9b73-483e-b7c2-cff89b833647\") " Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.265902 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-utilities" (OuterVolumeSpecName: "utilities") pod "d5228e91-9b73-483e-b7c2-cff89b833647" (UID: "d5228e91-9b73-483e-b7c2-cff89b833647"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.269762 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d5228e91-9b73-483e-b7c2-cff89b833647-kube-api-access-gxvrz" (OuterVolumeSpecName: "kube-api-access-gxvrz") pod "d5228e91-9b73-483e-b7c2-cff89b833647" (UID: "d5228e91-9b73-483e-b7c2-cff89b833647"). InnerVolumeSpecName "kube-api-access-gxvrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.307695 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d5228e91-9b73-483e-b7c2-cff89b833647" (UID: "d5228e91-9b73-483e-b7c2-cff89b833647"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.366690 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.367027 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxvrz\" (UniqueName: \"kubernetes.io/projected/d5228e91-9b73-483e-b7c2-cff89b833647-kube-api-access-gxvrz\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.367042 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d5228e91-9b73-483e-b7c2-cff89b833647-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.833680 4767 generic.go:334] "Generic (PLEG): container finished" podID="d5228e91-9b73-483e-b7c2-cff89b833647" containerID="930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9" exitCode=0 Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.833742 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5xhtx" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.833737 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xhtx" event={"ID":"d5228e91-9b73-483e-b7c2-cff89b833647","Type":"ContainerDied","Data":"930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9"} Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.833787 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5xhtx" event={"ID":"d5228e91-9b73-483e-b7c2-cff89b833647","Type":"ContainerDied","Data":"3546631f4bedcde9b695f534ad3c785e153acbc57fff1060af11bcd776585e12"} Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.833805 4767 scope.go:117] "RemoveContainer" containerID="930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.854734 4767 scope.go:117] "RemoveContainer" containerID="cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.858685 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xhtx"] Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.867005 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5xhtx"] Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.868918 4767 scope.go:117] "RemoveContainer" containerID="95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.888002 4767 scope.go:117] "RemoveContainer" containerID="930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9" Jan 28 18:45:30 crc kubenswrapper[4767]: E0128 18:45:30.888414 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9\": container with ID starting with 930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9 not found: ID does not exist" containerID="930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.888447 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9"} err="failed to get container status \"930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9\": rpc error: code = NotFound desc = could not find container \"930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9\": container with ID starting with 930153d281b8a2e246778a561fcb7e1f5982522d150f6da7d7d9ee930f87a9e9 not found: ID does not exist" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.888469 4767 scope.go:117] "RemoveContainer" containerID="cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf" Jan 28 18:45:30 crc kubenswrapper[4767]: E0128 18:45:30.888828 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf\": container with ID starting with cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf not found: ID does not exist" containerID="cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.888845 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf"} err="failed to get container status \"cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf\": rpc error: code = NotFound desc = could not find container \"cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf\": container with ID starting with cf62748f291d0dccd0ce79df801d60ae945c950702e707353556201396d622bf not found: ID does not exist" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.888859 4767 scope.go:117] "RemoveContainer" containerID="95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7" Jan 28 18:45:30 crc kubenswrapper[4767]: E0128 18:45:30.889292 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7\": container with ID starting with 95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7 not found: ID does not exist" containerID="95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7" Jan 28 18:45:30 crc kubenswrapper[4767]: I0128 18:45:30.889317 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7"} err="failed to get container status \"95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7\": rpc error: code = NotFound desc = could not find container \"95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7\": container with ID starting with 95b92cc59d01fc8524c8857cb99511689452534afbfc783235ba66c4a2fecea7 not found: ID does not exist" Jan 28 18:45:32 crc kubenswrapper[4767]: I0128 18:45:32.803222 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" path="/var/lib/kubelet/pods/d5228e91-9b73-483e-b7c2-cff89b833647/volumes" Jan 28 18:45:34 crc kubenswrapper[4767]: I0128 18:45:34.630554 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6f4744495b-dgmjt" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.293706 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-ngm5b"] Jan 28 18:45:35 crc kubenswrapper[4767]: E0128 18:45:35.294010 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="registry-server" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.294027 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="registry-server" Jan 28 18:45:35 crc kubenswrapper[4767]: E0128 18:45:35.294042 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="extract-utilities" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.294051 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="extract-utilities" Jan 28 18:45:35 crc kubenswrapper[4767]: E0128 18:45:35.294060 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="extract-content" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.294068 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="extract-content" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.294222 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="d5228e91-9b73-483e-b7c2-cff89b833647" containerName="registry-server" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.296624 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.299314 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-hfqtt" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.301053 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.301456 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.302503 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6"] Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.303432 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.305637 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.317859 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6"] Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330196 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr7lr\" (UniqueName: \"kubernetes.io/projected/e21010b1-73fb-4d7f-981a-a64a10495e7d-kube-api-access-nr7lr\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330301 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-startup\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330341 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-metrics\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330370 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-conf\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330428 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-sockets\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330463 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfqsv\" (UniqueName: \"kubernetes.io/projected/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-kube-api-access-wfqsv\") pod \"frr-k8s-webhook-server-7df86c4f6c-584q6\" (UID: \"db113ec8-a92c-4ddb-abc6-d49a3fb842f3\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330502 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-reloader\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330525 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-584q6\" (UID: \"db113ec8-a92c-4ddb-abc6-d49a3fb842f3\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.330554 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e21010b1-73fb-4d7f-981a-a64a10495e7d-metrics-certs\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.422073 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-lp8ks"] Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.423019 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.432567 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-dl225" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433335 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr7lr\" (UniqueName: \"kubernetes.io/projected/e21010b1-73fb-4d7f-981a-a64a10495e7d-kube-api-access-nr7lr\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433382 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-startup\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433402 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-metrics\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433419 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-conf\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433450 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-sockets\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433472 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfqsv\" (UniqueName: \"kubernetes.io/projected/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-kube-api-access-wfqsv\") pod \"frr-k8s-webhook-server-7df86c4f6c-584q6\" (UID: \"db113ec8-a92c-4ddb-abc6-d49a3fb842f3\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433496 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-reloader\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433510 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-584q6\" (UID: \"db113ec8-a92c-4ddb-abc6-d49a3fb842f3\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.433528 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e21010b1-73fb-4d7f-981a-a64a10495e7d-metrics-certs\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.434739 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-conf\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.435017 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-metrics\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.435412 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-startup\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.435624 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-frr-sockets\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: E0128 18:45:35.435775 4767 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Jan 28 18:45:35 crc kubenswrapper[4767]: E0128 18:45:35.435822 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-cert podName:db113ec8-a92c-4ddb-abc6-d49a3fb842f3 nodeName:}" failed. No retries permitted until 2026-01-28 18:45:35.935806777 +0000 UTC m=+941.899989651 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-cert") pod "frr-k8s-webhook-server-7df86c4f6c-584q6" (UID: "db113ec8-a92c-4ddb-abc6-d49a3fb842f3") : secret "frr-k8s-webhook-server-cert" not found Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.435846 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/e21010b1-73fb-4d7f-981a-a64a10495e7d-reloader\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.446186 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/e21010b1-73fb-4d7f-981a-a64a10495e7d-metrics-certs\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.446565 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.446755 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.451465 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.475956 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-lzbd2"] Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.477137 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.480724 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.514284 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfqsv\" (UniqueName: \"kubernetes.io/projected/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-kube-api-access-wfqsv\") pod \"frr-k8s-webhook-server-7df86c4f6c-584q6\" (UID: \"db113ec8-a92c-4ddb-abc6-d49a3fb842f3\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.515872 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr7lr\" (UniqueName: \"kubernetes.io/projected/e21010b1-73fb-4d7f-981a-a64a10495e7d-kube-api-access-nr7lr\") pod \"frr-k8s-ngm5b\" (UID: \"e21010b1-73fb-4d7f-981a-a64a10495e7d\") " pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.529455 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-lzbd2"] Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.541433 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/92854eca-77c5-4859-bfdb-21b7b6c96c98-metallb-excludel2\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.541486 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5qlw\" (UniqueName: \"kubernetes.io/projected/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-kube-api-access-v5qlw\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.541527 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-metrics-certs\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.541560 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-cert\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.541586 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4226n\" (UniqueName: \"kubernetes.io/projected/92854eca-77c5-4859-bfdb-21b7b6c96c98-kube-api-access-4226n\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.541637 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.541673 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-metrics-certs\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.618086 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.643629 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-metrics-certs\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.643697 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/92854eca-77c5-4859-bfdb-21b7b6c96c98-metallb-excludel2\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.643730 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5qlw\" (UniqueName: \"kubernetes.io/projected/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-kube-api-access-v5qlw\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.643771 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-metrics-certs\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.643804 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-cert\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.643828 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4226n\" (UniqueName: \"kubernetes.io/projected/92854eca-77c5-4859-bfdb-21b7b6c96c98-kube-api-access-4226n\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.643872 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: E0128 18:45:35.643969 4767 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 18:45:35 crc kubenswrapper[4767]: E0128 18:45:35.644039 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist podName:92854eca-77c5-4859-bfdb-21b7b6c96c98 nodeName:}" failed. No retries permitted until 2026-01-28 18:45:36.144021672 +0000 UTC m=+942.108204556 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist") pod "speaker-lp8ks" (UID: "92854eca-77c5-4859-bfdb-21b7b6c96c98") : secret "metallb-memberlist" not found Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.645464 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/92854eca-77c5-4859-bfdb-21b7b6c96c98-metallb-excludel2\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.653013 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-metrics-certs\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.654896 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-metrics-certs\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.659881 4767 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.663926 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4226n\" (UniqueName: \"kubernetes.io/projected/92854eca-77c5-4859-bfdb-21b7b6c96c98-kube-api-access-4226n\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.672083 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5qlw\" (UniqueName: \"kubernetes.io/projected/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-kube-api-access-v5qlw\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.673684 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b2e291f1-6f8b-46e8-bc66-7bc0896aef3c-cert\") pod \"controller-6968d8fdc4-lzbd2\" (UID: \"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c\") " pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.795009 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.877486 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerStarted","Data":"37929a2ddf4fecdf1d84ba7b92f767077f59300ac920e6cfd1001ddbcdb8fbc3"} Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.947268 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-584q6\" (UID: \"db113ec8-a92c-4ddb-abc6-d49a3fb842f3\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:35 crc kubenswrapper[4767]: I0128 18:45:35.956450 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/db113ec8-a92c-4ddb-abc6-d49a3fb842f3-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-584q6\" (UID: \"db113ec8-a92c-4ddb-abc6-d49a3fb842f3\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.036196 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-lzbd2"] Jan 28 18:45:36 crc kubenswrapper[4767]: W0128 18:45:36.042503 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb2e291f1_6f8b_46e8_bc66_7bc0896aef3c.slice/crio-04c0edbe22c2f5019a57d8b99482952e38723b17c56f73f80c3be03d67c55046 WatchSource:0}: Error finding container 04c0edbe22c2f5019a57d8b99482952e38723b17c56f73f80c3be03d67c55046: Status 404 returned error can't find the container with id 04c0edbe22c2f5019a57d8b99482952e38723b17c56f73f80c3be03d67c55046 Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.149834 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:36 crc kubenswrapper[4767]: E0128 18:45:36.150190 4767 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 28 18:45:36 crc kubenswrapper[4767]: E0128 18:45:36.150533 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist podName:92854eca-77c5-4859-bfdb-21b7b6c96c98 nodeName:}" failed. No retries permitted until 2026-01-28 18:45:37.150508455 +0000 UTC m=+943.114691329 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist") pod "speaker-lp8ks" (UID: "92854eca-77c5-4859-bfdb-21b7b6c96c98") : secret "metallb-memberlist" not found Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.225491 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.528965 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6"] Jan 28 18:45:36 crc kubenswrapper[4767]: W0128 18:45:36.532412 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb113ec8_a92c_4ddb_abc6_d49a3fb842f3.slice/crio-84ce9ee6912d57aad71beb75c3c77114a6223c18198b575e6080846e5f261f67 WatchSource:0}: Error finding container 84ce9ee6912d57aad71beb75c3c77114a6223c18198b575e6080846e5f261f67: Status 404 returned error can't find the container with id 84ce9ee6912d57aad71beb75c3c77114a6223c18198b575e6080846e5f261f67 Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.883254 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" event={"ID":"db113ec8-a92c-4ddb-abc6-d49a3fb842f3","Type":"ContainerStarted","Data":"84ce9ee6912d57aad71beb75c3c77114a6223c18198b575e6080846e5f261f67"} Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.885193 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-lzbd2" event={"ID":"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c","Type":"ContainerStarted","Data":"0bb6b6b01e861edccd1594b7a44168be9a86c3bc8cf6c375250c97da3a93d696"} Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.885244 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-lzbd2" event={"ID":"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c","Type":"ContainerStarted","Data":"a7963f80f7e0f3a2fefe836b7f28355a47813465f6e6307c5495c2d3a878be96"} Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.885258 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-lzbd2" event={"ID":"b2e291f1-6f8b-46e8-bc66-7bc0896aef3c","Type":"ContainerStarted","Data":"04c0edbe22c2f5019a57d8b99482952e38723b17c56f73f80c3be03d67c55046"} Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.885427 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:36 crc kubenswrapper[4767]: I0128 18:45:36.908831 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-lzbd2" podStartSLOduration=1.908805898 podStartE2EDuration="1.908805898s" podCreationTimestamp="2026-01-28 18:45:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:45:36.902766059 +0000 UTC m=+942.866948943" watchObservedRunningTime="2026-01-28 18:45:36.908805898 +0000 UTC m=+942.872988772" Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.202168 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.207327 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/92854eca-77c5-4859-bfdb-21b7b6c96c98-memberlist\") pod \"speaker-lp8ks\" (UID: \"92854eca-77c5-4859-bfdb-21b7b6c96c98\") " pod="metallb-system/speaker-lp8ks" Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.273007 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-lp8ks" Jan 28 18:45:37 crc kubenswrapper[4767]: W0128 18:45:37.291562 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod92854eca_77c5_4859_bfdb_21b7b6c96c98.slice/crio-f3109f80b5e8f4e974503d3185bd24f2f5fc5c716bf782c215db61f9f87fed3e WatchSource:0}: Error finding container f3109f80b5e8f4e974503d3185bd24f2f5fc5c716bf782c215db61f9f87fed3e: Status 404 returned error can't find the container with id f3109f80b5e8f4e974503d3185bd24f2f5fc5c716bf782c215db61f9f87fed3e Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.869432 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-czlpn"] Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.871145 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.919602 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-czlpn"] Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.984889 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lp8ks" event={"ID":"92854eca-77c5-4859-bfdb-21b7b6c96c98","Type":"ContainerStarted","Data":"a6de545be40f30c3a78f552415124fd8e8fac48beebcf829e9ef41ec1e32c559"} Jan 28 18:45:37 crc kubenswrapper[4767]: I0128 18:45:37.984932 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lp8ks" event={"ID":"92854eca-77c5-4859-bfdb-21b7b6c96c98","Type":"ContainerStarted","Data":"f3109f80b5e8f4e974503d3185bd24f2f5fc5c716bf782c215db61f9f87fed3e"} Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.014300 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-catalog-content\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.014376 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-utilities\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.014407 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7kz5\" (UniqueName: \"kubernetes.io/projected/41287a5e-8840-40e2-914e-a0b7bb92ecee-kube-api-access-m7kz5\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.115121 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7kz5\" (UniqueName: \"kubernetes.io/projected/41287a5e-8840-40e2-914e-a0b7bb92ecee-kube-api-access-m7kz5\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.115246 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-catalog-content\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.115300 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-utilities\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.115761 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-catalog-content\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.115838 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-utilities\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.157192 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7kz5\" (UniqueName: \"kubernetes.io/projected/41287a5e-8840-40e2-914e-a0b7bb92ecee-kube-api-access-m7kz5\") pod \"certified-operators-czlpn\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.253161 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:38 crc kubenswrapper[4767]: I0128 18:45:38.709571 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-czlpn"] Jan 28 18:45:39 crc kubenswrapper[4767]: I0128 18:45:39.000765 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-lp8ks" event={"ID":"92854eca-77c5-4859-bfdb-21b7b6c96c98","Type":"ContainerStarted","Data":"64d726f470c5d500f8bef1d0aefbc1947ac30976529af1d1f8a19f325d2a4e07"} Jan 28 18:45:39 crc kubenswrapper[4767]: I0128 18:45:39.001374 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-lp8ks" Jan 28 18:45:39 crc kubenswrapper[4767]: I0128 18:45:39.012794 4767 generic.go:334] "Generic (PLEG): container finished" podID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerID="afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5" exitCode=0 Jan 28 18:45:39 crc kubenswrapper[4767]: I0128 18:45:39.012853 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-czlpn" event={"ID":"41287a5e-8840-40e2-914e-a0b7bb92ecee","Type":"ContainerDied","Data":"afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5"} Jan 28 18:45:39 crc kubenswrapper[4767]: I0128 18:45:39.012902 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-czlpn" event={"ID":"41287a5e-8840-40e2-914e-a0b7bb92ecee","Type":"ContainerStarted","Data":"f4435662ce1ad9278eacf1ebdee1136ef07462556f646dabacb03b3e069ebe16"} Jan 28 18:45:39 crc kubenswrapper[4767]: I0128 18:45:39.025754 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-lp8ks" podStartSLOduration=4.025722648 podStartE2EDuration="4.025722648s" podCreationTimestamp="2026-01-28 18:45:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:45:39.02290069 +0000 UTC m=+944.987083594" watchObservedRunningTime="2026-01-28 18:45:39.025722648 +0000 UTC m=+944.989905522" Jan 28 18:45:40 crc kubenswrapper[4767]: I0128 18:45:40.023495 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-czlpn" event={"ID":"41287a5e-8840-40e2-914e-a0b7bb92ecee","Type":"ContainerStarted","Data":"33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e"} Jan 28 18:45:41 crc kubenswrapper[4767]: I0128 18:45:41.043571 4767 generic.go:334] "Generic (PLEG): container finished" podID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerID="33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e" exitCode=0 Jan 28 18:45:41 crc kubenswrapper[4767]: I0128 18:45:41.043672 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-czlpn" event={"ID":"41287a5e-8840-40e2-914e-a0b7bb92ecee","Type":"ContainerDied","Data":"33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e"} Jan 28 18:45:42 crc kubenswrapper[4767]: I0128 18:45:42.056748 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-czlpn" event={"ID":"41287a5e-8840-40e2-914e-a0b7bb92ecee","Type":"ContainerStarted","Data":"86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce"} Jan 28 18:45:42 crc kubenswrapper[4767]: I0128 18:45:42.080692 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-czlpn" podStartSLOduration=2.603440504 podStartE2EDuration="5.080668945s" podCreationTimestamp="2026-01-28 18:45:37 +0000 UTC" firstStartedPulling="2026-01-28 18:45:39.017979986 +0000 UTC m=+944.982162860" lastFinishedPulling="2026-01-28 18:45:41.495208437 +0000 UTC m=+947.459391301" observedRunningTime="2026-01-28 18:45:42.076466502 +0000 UTC m=+948.040649386" watchObservedRunningTime="2026-01-28 18:45:42.080668945 +0000 UTC m=+948.044851819" Jan 28 18:45:45 crc kubenswrapper[4767]: I0128 18:45:45.455698 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:45:45 crc kubenswrapper[4767]: I0128 18:45:45.456330 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.154309 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kw288"] Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.193412 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.201683 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kw288"] Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.273644 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-catalog-content\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.274166 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds6mx\" (UniqueName: \"kubernetes.io/projected/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-kube-api-access-ds6mx\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.274201 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-utilities\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.375752 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds6mx\" (UniqueName: \"kubernetes.io/projected/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-kube-api-access-ds6mx\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.375840 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-utilities\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.375954 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-catalog-content\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.376812 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-catalog-content\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.376891 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-utilities\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.413238 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds6mx\" (UniqueName: \"kubernetes.io/projected/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-kube-api-access-ds6mx\") pod \"community-operators-kw288\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.561938 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:46 crc kubenswrapper[4767]: I0128 18:45:46.917536 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kw288"] Jan 28 18:45:46 crc kubenswrapper[4767]: W0128 18:45:46.925335 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcd131a5c_08d2_4025_a9bf_4aa957b02e2b.slice/crio-52f3e399cbadc3c9c0563a653387d7e662d6fb982a0db1978174ca21bfe745d1 WatchSource:0}: Error finding container 52f3e399cbadc3c9c0563a653387d7e662d6fb982a0db1978174ca21bfe745d1: Status 404 returned error can't find the container with id 52f3e399cbadc3c9c0563a653387d7e662d6fb982a0db1978174ca21bfe745d1 Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.095538 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" event={"ID":"db113ec8-a92c-4ddb-abc6-d49a3fb842f3","Type":"ContainerStarted","Data":"e4526034ede95d0c12390ed0488ab8cbcee301753ed8ace8cb8de7b33e2034c0"} Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.095731 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.099656 4767 generic.go:334] "Generic (PLEG): container finished" podID="e21010b1-73fb-4d7f-981a-a64a10495e7d" containerID="f80507bb300fa41d47980142ba1a29c856c1c2732fd0c5479a37302cee747a5f" exitCode=0 Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.099760 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerDied","Data":"f80507bb300fa41d47980142ba1a29c856c1c2732fd0c5479a37302cee747a5f"} Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.102845 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerStarted","Data":"f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82"} Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.102935 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerStarted","Data":"52f3e399cbadc3c9c0563a653387d7e662d6fb982a0db1978174ca21bfe745d1"} Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.129380 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" podStartSLOduration=2.592891996 podStartE2EDuration="12.129350121s" podCreationTimestamp="2026-01-28 18:45:35 +0000 UTC" firstStartedPulling="2026-01-28 18:45:36.536847622 +0000 UTC m=+942.501030496" lastFinishedPulling="2026-01-28 18:45:46.073305747 +0000 UTC m=+952.037488621" observedRunningTime="2026-01-28 18:45:47.118707227 +0000 UTC m=+953.082890111" watchObservedRunningTime="2026-01-28 18:45:47.129350121 +0000 UTC m=+953.093532995" Jan 28 18:45:47 crc kubenswrapper[4767]: I0128 18:45:47.280573 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-lp8ks" Jan 28 18:45:48 crc kubenswrapper[4767]: I0128 18:45:48.115468 4767 generic.go:334] "Generic (PLEG): container finished" podID="e21010b1-73fb-4d7f-981a-a64a10495e7d" containerID="54117e3e1f5a30b8fc751f572033940f7dac428dbde951d3118ab468c500d032" exitCode=0 Jan 28 18:45:48 crc kubenswrapper[4767]: I0128 18:45:48.115643 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerDied","Data":"54117e3e1f5a30b8fc751f572033940f7dac428dbde951d3118ab468c500d032"} Jan 28 18:45:48 crc kubenswrapper[4767]: I0128 18:45:48.123314 4767 generic.go:334] "Generic (PLEG): container finished" podID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerID="f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82" exitCode=0 Jan 28 18:45:48 crc kubenswrapper[4767]: I0128 18:45:48.123395 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerDied","Data":"f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82"} Jan 28 18:45:48 crc kubenswrapper[4767]: I0128 18:45:48.254070 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:48 crc kubenswrapper[4767]: I0128 18:45:48.254129 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:48 crc kubenswrapper[4767]: I0128 18:45:48.298071 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:49 crc kubenswrapper[4767]: I0128 18:45:49.131903 4767 generic.go:334] "Generic (PLEG): container finished" podID="e21010b1-73fb-4d7f-981a-a64a10495e7d" containerID="e79f570cb5ecc675d4cce31e998758f75b057c2b1f29a64db1e4fb701dfda069" exitCode=0 Jan 28 18:45:49 crc kubenswrapper[4767]: I0128 18:45:49.132005 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerDied","Data":"e79f570cb5ecc675d4cce31e998758f75b057c2b1f29a64db1e4fb701dfda069"} Jan 28 18:45:49 crc kubenswrapper[4767]: I0128 18:45:49.195664 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:50 crc kubenswrapper[4767]: I0128 18:45:50.148410 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerStarted","Data":"1481a401ac3badf0b540abb25b8f0cb6c71d4123d27882f2cfff76129d9988eb"} Jan 28 18:45:50 crc kubenswrapper[4767]: I0128 18:45:50.148956 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerStarted","Data":"e8f93612632a55f0bf5759426b9804826919d16af76344e6e3395ad42adc7269"} Jan 28 18:45:50 crc kubenswrapper[4767]: I0128 18:45:50.148972 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerStarted","Data":"bad037afa0de14ed71b9f0361c1a6c6dfaa91d98e488839ddf29d97c13f413ca"} Jan 28 18:45:50 crc kubenswrapper[4767]: I0128 18:45:50.148982 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerStarted","Data":"c034281278c1af33902c4037870bbea6b018491926ac709b7e6209ee3be8149d"} Jan 28 18:45:50 crc kubenswrapper[4767]: I0128 18:45:50.148993 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerStarted","Data":"71a957f2ca58fc7d4bb7823009cebdc621c8796988add4078843d2d649b72f57"} Jan 28 18:45:51 crc kubenswrapper[4767]: I0128 18:45:51.159585 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-ngm5b" event={"ID":"e21010b1-73fb-4d7f-981a-a64a10495e7d","Type":"ContainerStarted","Data":"c2230e6242a4f6d1c8b4f5bf3f71a70b084126b236631f2acb970831735e3564"} Jan 28 18:45:51 crc kubenswrapper[4767]: I0128 18:45:51.159820 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:51 crc kubenswrapper[4767]: I0128 18:45:51.182728 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-ngm5b" podStartSLOduration=5.890060164 podStartE2EDuration="16.182690055s" podCreationTimestamp="2026-01-28 18:45:35 +0000 UTC" firstStartedPulling="2026-01-28 18:45:35.812800122 +0000 UTC m=+941.776982996" lastFinishedPulling="2026-01-28 18:45:46.105430013 +0000 UTC m=+952.069612887" observedRunningTime="2026-01-28 18:45:51.180574229 +0000 UTC m=+957.144757103" watchObservedRunningTime="2026-01-28 18:45:51.182690055 +0000 UTC m=+957.146872929" Jan 28 18:45:51 crc kubenswrapper[4767]: I0128 18:45:51.712721 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-czlpn"] Jan 28 18:45:51 crc kubenswrapper[4767]: I0128 18:45:51.713066 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-czlpn" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="registry-server" containerID="cri-o://86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce" gracePeriod=2 Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.127236 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.169866 4767 generic.go:334] "Generic (PLEG): container finished" podID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerID="86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce" exitCode=0 Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.169958 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-czlpn" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.169964 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-czlpn" event={"ID":"41287a5e-8840-40e2-914e-a0b7bb92ecee","Type":"ContainerDied","Data":"86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce"} Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.170104 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-czlpn" event={"ID":"41287a5e-8840-40e2-914e-a0b7bb92ecee","Type":"ContainerDied","Data":"f4435662ce1ad9278eacf1ebdee1136ef07462556f646dabacb03b3e069ebe16"} Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.170127 4767 scope.go:117] "RemoveContainer" containerID="86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.182384 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-catalog-content\") pod \"41287a5e-8840-40e2-914e-a0b7bb92ecee\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.182463 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7kz5\" (UniqueName: \"kubernetes.io/projected/41287a5e-8840-40e2-914e-a0b7bb92ecee-kube-api-access-m7kz5\") pod \"41287a5e-8840-40e2-914e-a0b7bb92ecee\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.182520 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-utilities\") pod \"41287a5e-8840-40e2-914e-a0b7bb92ecee\" (UID: \"41287a5e-8840-40e2-914e-a0b7bb92ecee\") " Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.184107 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-utilities" (OuterVolumeSpecName: "utilities") pod "41287a5e-8840-40e2-914e-a0b7bb92ecee" (UID: "41287a5e-8840-40e2-914e-a0b7bb92ecee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.189528 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/41287a5e-8840-40e2-914e-a0b7bb92ecee-kube-api-access-m7kz5" (OuterVolumeSpecName: "kube-api-access-m7kz5") pod "41287a5e-8840-40e2-914e-a0b7bb92ecee" (UID: "41287a5e-8840-40e2-914e-a0b7bb92ecee"). InnerVolumeSpecName "kube-api-access-m7kz5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.191440 4767 scope.go:117] "RemoveContainer" containerID="33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.226530 4767 scope.go:117] "RemoveContainer" containerID="afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.243127 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "41287a5e-8840-40e2-914e-a0b7bb92ecee" (UID: "41287a5e-8840-40e2-914e-a0b7bb92ecee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.246403 4767 scope.go:117] "RemoveContainer" containerID="86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce" Jan 28 18:45:52 crc kubenswrapper[4767]: E0128 18:45:52.247707 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce\": container with ID starting with 86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce not found: ID does not exist" containerID="86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.247823 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce"} err="failed to get container status \"86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce\": rpc error: code = NotFound desc = could not find container \"86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce\": container with ID starting with 86cc84c19c5f41d9fcac3904aafd5efd7745b88a1538bc3b1f00227acca736ce not found: ID does not exist" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.250907 4767 scope.go:117] "RemoveContainer" containerID="33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e" Jan 28 18:45:52 crc kubenswrapper[4767]: E0128 18:45:52.252172 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e\": container with ID starting with 33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e not found: ID does not exist" containerID="33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.252292 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e"} err="failed to get container status \"33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e\": rpc error: code = NotFound desc = could not find container \"33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e\": container with ID starting with 33cf144942cb01836f0f42730eb353c6fbedda383ccc8f360ad95b83a193cc0e not found: ID does not exist" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.252391 4767 scope.go:117] "RemoveContainer" containerID="afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5" Jan 28 18:45:52 crc kubenswrapper[4767]: E0128 18:45:52.252768 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5\": container with ID starting with afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5 not found: ID does not exist" containerID="afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.252871 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5"} err="failed to get container status \"afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5\": rpc error: code = NotFound desc = could not find container \"afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5\": container with ID starting with afa3c5e633084b99c5fda6bd5bff08a51826c49652f33025914c3e0b9c7efdd5 not found: ID does not exist" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.285097 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.285146 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7kz5\" (UniqueName: \"kubernetes.io/projected/41287a5e-8840-40e2-914e-a0b7bb92ecee-kube-api-access-m7kz5\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.285158 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/41287a5e-8840-40e2-914e-a0b7bb92ecee-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.504967 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-czlpn"] Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.510515 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-czlpn"] Jan 28 18:45:52 crc kubenswrapper[4767]: I0128 18:45:52.805649 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" path="/var/lib/kubelet/pods/41287a5e-8840-40e2-914e-a0b7bb92ecee/volumes" Jan 28 18:45:54 crc kubenswrapper[4767]: I0128 18:45:54.189404 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerStarted","Data":"79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278"} Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.203549 4767 generic.go:334] "Generic (PLEG): container finished" podID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerID="79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278" exitCode=0 Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.203764 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerDied","Data":"79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278"} Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.619235 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.666258 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.729103 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-d78gr"] Jan 28 18:45:55 crc kubenswrapper[4767]: E0128 18:45:55.729414 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="extract-utilities" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.729427 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="extract-utilities" Jan 28 18:45:55 crc kubenswrapper[4767]: E0128 18:45:55.729449 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="registry-server" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.729455 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="registry-server" Jan 28 18:45:55 crc kubenswrapper[4767]: E0128 18:45:55.729465 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="extract-content" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.729472 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="extract-content" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.729602 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="41287a5e-8840-40e2-914e-a0b7bb92ecee" containerName="registry-server" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.730022 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.733790 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.733995 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.734115 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-gmsnr" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.757186 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d78gr"] Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.799931 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-lzbd2" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.850084 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2jzbp\" (UniqueName: \"kubernetes.io/projected/5c19930e-137e-402d-90f2-a25b9ff8117c-kube-api-access-2jzbp\") pod \"openstack-operator-index-d78gr\" (UID: \"5c19930e-137e-402d-90f2-a25b9ff8117c\") " pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.952246 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2jzbp\" (UniqueName: \"kubernetes.io/projected/5c19930e-137e-402d-90f2-a25b9ff8117c-kube-api-access-2jzbp\") pod \"openstack-operator-index-d78gr\" (UID: \"5c19930e-137e-402d-90f2-a25b9ff8117c\") " pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:45:55 crc kubenswrapper[4767]: I0128 18:45:55.976740 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2jzbp\" (UniqueName: \"kubernetes.io/projected/5c19930e-137e-402d-90f2-a25b9ff8117c-kube-api-access-2jzbp\") pod \"openstack-operator-index-d78gr\" (UID: \"5c19930e-137e-402d-90f2-a25b9ff8117c\") " pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:45:56 crc kubenswrapper[4767]: I0128 18:45:56.066907 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:45:56 crc kubenswrapper[4767]: I0128 18:45:56.226464 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerStarted","Data":"2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f"} Jan 28 18:45:56 crc kubenswrapper[4767]: I0128 18:45:56.242498 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-584q6" Jan 28 18:45:56 crc kubenswrapper[4767]: I0128 18:45:56.317690 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kw288" podStartSLOduration=2.6174677060000002 podStartE2EDuration="10.317661866s" podCreationTimestamp="2026-01-28 18:45:46 +0000 UTC" firstStartedPulling="2026-01-28 18:45:48.124719434 +0000 UTC m=+954.088902308" lastFinishedPulling="2026-01-28 18:45:55.824913594 +0000 UTC m=+961.789096468" observedRunningTime="2026-01-28 18:45:56.312517765 +0000 UTC m=+962.276700639" watchObservedRunningTime="2026-01-28 18:45:56.317661866 +0000 UTC m=+962.281844740" Jan 28 18:45:56 crc kubenswrapper[4767]: I0128 18:45:56.352298 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-d78gr"] Jan 28 18:45:56 crc kubenswrapper[4767]: I0128 18:45:56.562174 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:56 crc kubenswrapper[4767]: I0128 18:45:56.562282 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:45:57 crc kubenswrapper[4767]: I0128 18:45:57.235616 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d78gr" event={"ID":"5c19930e-137e-402d-90f2-a25b9ff8117c","Type":"ContainerStarted","Data":"e7c1738a9b4a92c15cc3ebd3d2932b461268ac5d1037e464411623b4b996008d"} Jan 28 18:45:57 crc kubenswrapper[4767]: I0128 18:45:57.618414 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-kw288" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="registry-server" probeResult="failure" output=< Jan 28 18:45:57 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 18:45:57 crc kubenswrapper[4767]: > Jan 28 18:46:00 crc kubenswrapper[4767]: I0128 18:46:00.259252 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-d78gr" event={"ID":"5c19930e-137e-402d-90f2-a25b9ff8117c","Type":"ContainerStarted","Data":"09aa15a4759e1e5bb327b02eb3f345c81d6e487061e77e571a7f30091a0e01b3"} Jan 28 18:46:00 crc kubenswrapper[4767]: I0128 18:46:00.285306 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-d78gr" podStartSLOduration=2.20520803 podStartE2EDuration="5.285271104s" podCreationTimestamp="2026-01-28 18:45:55 +0000 UTC" firstStartedPulling="2026-01-28 18:45:56.369235053 +0000 UTC m=+962.333417927" lastFinishedPulling="2026-01-28 18:45:59.449298127 +0000 UTC m=+965.413481001" observedRunningTime="2026-01-28 18:46:00.278937815 +0000 UTC m=+966.243120699" watchObservedRunningTime="2026-01-28 18:46:00.285271104 +0000 UTC m=+966.249453988" Jan 28 18:46:05 crc kubenswrapper[4767]: I0128 18:46:05.622467 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-ngm5b" Jan 28 18:46:06 crc kubenswrapper[4767]: I0128 18:46:06.067475 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:46:06 crc kubenswrapper[4767]: I0128 18:46:06.067579 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:46:06 crc kubenswrapper[4767]: I0128 18:46:06.104867 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:46:06 crc kubenswrapper[4767]: I0128 18:46:06.337501 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-d78gr" Jan 28 18:46:06 crc kubenswrapper[4767]: I0128 18:46:06.627344 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:46:06 crc kubenswrapper[4767]: I0128 18:46:06.671535 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:46:08 crc kubenswrapper[4767]: I0128 18:46:08.760928 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc"] Jan 28 18:46:08 crc kubenswrapper[4767]: I0128 18:46:08.763241 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:08 crc kubenswrapper[4767]: I0128 18:46:08.766307 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-j49wc" Jan 28 18:46:08 crc kubenswrapper[4767]: I0128 18:46:08.774369 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc"] Jan 28 18:46:08 crc kubenswrapper[4767]: I0128 18:46:08.919284 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-bundle\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:08 crc kubenswrapper[4767]: I0128 18:46:08.919624 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-util\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:08 crc kubenswrapper[4767]: I0128 18:46:08.919796 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8p76l\" (UniqueName: \"kubernetes.io/projected/661846e5-b7a9-457e-99fe-86e94f07dda7-kube-api-access-8p76l\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.021794 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8p76l\" (UniqueName: \"kubernetes.io/projected/661846e5-b7a9-457e-99fe-86e94f07dda7-kube-api-access-8p76l\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.021911 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-bundle\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.021939 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-util\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.022527 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-util\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.022613 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-bundle\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.042192 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8p76l\" (UniqueName: \"kubernetes.io/projected/661846e5-b7a9-457e-99fe-86e94f07dda7-kube-api-access-8p76l\") pod \"a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.090978 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.302112 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc"] Jan 28 18:46:09 crc kubenswrapper[4767]: I0128 18:46:09.328621 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" event={"ID":"661846e5-b7a9-457e-99fe-86e94f07dda7","Type":"ContainerStarted","Data":"5b4c5c163b9f36e63b7e679b1491c54402d1012a0cb8fe30fbecebee937469f0"} Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.312275 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kw288"] Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.312829 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kw288" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="registry-server" containerID="cri-o://2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f" gracePeriod=2 Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.337689 4767 generic.go:334] "Generic (PLEG): container finished" podID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerID="02d4d851ae0f3c5b79c537ac8a23d70375390b4629700f98b6eeb87e80cf0490" exitCode=0 Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.337738 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" event={"ID":"661846e5-b7a9-457e-99fe-86e94f07dda7","Type":"ContainerDied","Data":"02d4d851ae0f3c5b79c537ac8a23d70375390b4629700f98b6eeb87e80cf0490"} Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.816336 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.950717 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ds6mx\" (UniqueName: \"kubernetes.io/projected/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-kube-api-access-ds6mx\") pod \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.950805 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-catalog-content\") pod \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.950941 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-utilities\") pod \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\" (UID: \"cd131a5c-08d2-4025-a9bf-4aa957b02e2b\") " Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.952770 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-utilities" (OuterVolumeSpecName: "utilities") pod "cd131a5c-08d2-4025-a9bf-4aa957b02e2b" (UID: "cd131a5c-08d2-4025-a9bf-4aa957b02e2b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:46:10 crc kubenswrapper[4767]: I0128 18:46:10.960503 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-kube-api-access-ds6mx" (OuterVolumeSpecName: "kube-api-access-ds6mx") pod "cd131a5c-08d2-4025-a9bf-4aa957b02e2b" (UID: "cd131a5c-08d2-4025-a9bf-4aa957b02e2b"). InnerVolumeSpecName "kube-api-access-ds6mx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.013141 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cd131a5c-08d2-4025-a9bf-4aa957b02e2b" (UID: "cd131a5c-08d2-4025-a9bf-4aa957b02e2b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.052833 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.052895 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ds6mx\" (UniqueName: \"kubernetes.io/projected/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-kube-api-access-ds6mx\") on node \"crc\" DevicePath \"\"" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.052908 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cd131a5c-08d2-4025-a9bf-4aa957b02e2b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.348296 4767 generic.go:334] "Generic (PLEG): container finished" podID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerID="2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f" exitCode=0 Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.348363 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerDied","Data":"2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f"} Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.348418 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kw288" event={"ID":"cd131a5c-08d2-4025-a9bf-4aa957b02e2b","Type":"ContainerDied","Data":"52f3e399cbadc3c9c0563a653387d7e662d6fb982a0db1978174ca21bfe745d1"} Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.348438 4767 scope.go:117] "RemoveContainer" containerID="2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.348509 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kw288" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.353522 4767 generic.go:334] "Generic (PLEG): container finished" podID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerID="9fcc6f1451f2423f22346d5326036b2ab359cc25ffa2c210afed9d859e4a396f" exitCode=0 Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.353566 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" event={"ID":"661846e5-b7a9-457e-99fe-86e94f07dda7","Type":"ContainerDied","Data":"9fcc6f1451f2423f22346d5326036b2ab359cc25ffa2c210afed9d859e4a396f"} Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.377892 4767 scope.go:117] "RemoveContainer" containerID="79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.404715 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kw288"] Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.413929 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kw288"] Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.434403 4767 scope.go:117] "RemoveContainer" containerID="f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.460965 4767 scope.go:117] "RemoveContainer" containerID="2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f" Jan 28 18:46:11 crc kubenswrapper[4767]: E0128 18:46:11.461677 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f\": container with ID starting with 2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f not found: ID does not exist" containerID="2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.461725 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f"} err="failed to get container status \"2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f\": rpc error: code = NotFound desc = could not find container \"2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f\": container with ID starting with 2bdd4d2eefe6f6373c34558a5743087613b51abd0baa151c5d328a725ff7d92f not found: ID does not exist" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.461786 4767 scope.go:117] "RemoveContainer" containerID="79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278" Jan 28 18:46:11 crc kubenswrapper[4767]: E0128 18:46:11.462616 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278\": container with ID starting with 79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278 not found: ID does not exist" containerID="79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.462647 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278"} err="failed to get container status \"79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278\": rpc error: code = NotFound desc = could not find container \"79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278\": container with ID starting with 79feb70e73c1e8a2428a314d4360ac14d13c11dbdc8e17042d5a036f8a244278 not found: ID does not exist" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.462664 4767 scope.go:117] "RemoveContainer" containerID="f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82" Jan 28 18:46:11 crc kubenswrapper[4767]: E0128 18:46:11.463259 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82\": container with ID starting with f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82 not found: ID does not exist" containerID="f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82" Jan 28 18:46:11 crc kubenswrapper[4767]: I0128 18:46:11.463323 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82"} err="failed to get container status \"f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82\": rpc error: code = NotFound desc = could not find container \"f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82\": container with ID starting with f462fee3dec64af7228d8f5b684769facfaf22c5a57d41e2265be96e400e8f82 not found: ID does not exist" Jan 28 18:46:12 crc kubenswrapper[4767]: I0128 18:46:12.363229 4767 generic.go:334] "Generic (PLEG): container finished" podID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerID="12bd37b38d433ed78a999aa199bd8d635045e5121dad9e35c00f78a58eb15b6b" exitCode=0 Jan 28 18:46:12 crc kubenswrapper[4767]: I0128 18:46:12.363359 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" event={"ID":"661846e5-b7a9-457e-99fe-86e94f07dda7","Type":"ContainerDied","Data":"12bd37b38d433ed78a999aa199bd8d635045e5121dad9e35c00f78a58eb15b6b"} Jan 28 18:46:12 crc kubenswrapper[4767]: I0128 18:46:12.803474 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" path="/var/lib/kubelet/pods/cd131a5c-08d2-4025-a9bf-4aa957b02e2b/volumes" Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.635056 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.793797 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-bundle\") pod \"661846e5-b7a9-457e-99fe-86e94f07dda7\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.793843 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-util\") pod \"661846e5-b7a9-457e-99fe-86e94f07dda7\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.793873 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8p76l\" (UniqueName: \"kubernetes.io/projected/661846e5-b7a9-457e-99fe-86e94f07dda7-kube-api-access-8p76l\") pod \"661846e5-b7a9-457e-99fe-86e94f07dda7\" (UID: \"661846e5-b7a9-457e-99fe-86e94f07dda7\") " Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.795018 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-bundle" (OuterVolumeSpecName: "bundle") pod "661846e5-b7a9-457e-99fe-86e94f07dda7" (UID: "661846e5-b7a9-457e-99fe-86e94f07dda7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.803478 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/661846e5-b7a9-457e-99fe-86e94f07dda7-kube-api-access-8p76l" (OuterVolumeSpecName: "kube-api-access-8p76l") pod "661846e5-b7a9-457e-99fe-86e94f07dda7" (UID: "661846e5-b7a9-457e-99fe-86e94f07dda7"). InnerVolumeSpecName "kube-api-access-8p76l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.809332 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-util" (OuterVolumeSpecName: "util") pod "661846e5-b7a9-457e-99fe-86e94f07dda7" (UID: "661846e5-b7a9-457e-99fe-86e94f07dda7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.895947 4767 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.895998 4767 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/661846e5-b7a9-457e-99fe-86e94f07dda7-util\") on node \"crc\" DevicePath \"\"" Jan 28 18:46:13 crc kubenswrapper[4767]: I0128 18:46:13.896009 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8p76l\" (UniqueName: \"kubernetes.io/projected/661846e5-b7a9-457e-99fe-86e94f07dda7-kube-api-access-8p76l\") on node \"crc\" DevicePath \"\"" Jan 28 18:46:14 crc kubenswrapper[4767]: I0128 18:46:14.381793 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" event={"ID":"661846e5-b7a9-457e-99fe-86e94f07dda7","Type":"ContainerDied","Data":"5b4c5c163b9f36e63b7e679b1491c54402d1012a0cb8fe30fbecebee937469f0"} Jan 28 18:46:14 crc kubenswrapper[4767]: I0128 18:46:14.381838 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5b4c5c163b9f36e63b7e679b1491c54402d1012a0cb8fe30fbecebee937469f0" Jan 28 18:46:14 crc kubenswrapper[4767]: I0128 18:46:14.381864 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc" Jan 28 18:46:15 crc kubenswrapper[4767]: I0128 18:46:15.456112 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:46:15 crc kubenswrapper[4767]: I0128 18:46:15.456815 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.136865 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk"] Jan 28 18:46:18 crc kubenswrapper[4767]: E0128 18:46:18.138507 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerName="util" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.138592 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerName="util" Jan 28 18:46:18 crc kubenswrapper[4767]: E0128 18:46:18.138665 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="extract-utilities" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.138743 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="extract-utilities" Jan 28 18:46:18 crc kubenswrapper[4767]: E0128 18:46:18.138810 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="registry-server" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.138883 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="registry-server" Jan 28 18:46:18 crc kubenswrapper[4767]: E0128 18:46:18.139046 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="extract-content" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.139122 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="extract-content" Jan 28 18:46:18 crc kubenswrapper[4767]: E0128 18:46:18.139199 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerName="extract" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.139283 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerName="extract" Jan 28 18:46:18 crc kubenswrapper[4767]: E0128 18:46:18.139372 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerName="pull" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.139439 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerName="pull" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.139637 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd131a5c-08d2-4025-a9bf-4aa957b02e2b" containerName="registry-server" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.139729 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="661846e5-b7a9-457e-99fe-86e94f07dda7" containerName="extract" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.140166 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.147185 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-znw6l" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.265219 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-glx76\" (UniqueName: \"kubernetes.io/projected/39734b99-2733-4aef-b88e-12a87340933c-kube-api-access-glx76\") pod \"openstack-operator-controller-init-84ff885fbb-jgrkk\" (UID: \"39734b99-2733-4aef-b88e-12a87340933c\") " pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.281478 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk"] Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.366611 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-glx76\" (UniqueName: \"kubernetes.io/projected/39734b99-2733-4aef-b88e-12a87340933c-kube-api-access-glx76\") pod \"openstack-operator-controller-init-84ff885fbb-jgrkk\" (UID: \"39734b99-2733-4aef-b88e-12a87340933c\") " pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.388448 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-glx76\" (UniqueName: \"kubernetes.io/projected/39734b99-2733-4aef-b88e-12a87340933c-kube-api-access-glx76\") pod \"openstack-operator-controller-init-84ff885fbb-jgrkk\" (UID: \"39734b99-2733-4aef-b88e-12a87340933c\") " pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.460449 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" Jan 28 18:46:18 crc kubenswrapper[4767]: I0128 18:46:18.788023 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk"] Jan 28 18:46:18 crc kubenswrapper[4767]: W0128 18:46:18.800556 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod39734b99_2733_4aef_b88e_12a87340933c.slice/crio-bcd74f66c965ccd8f577e1196a433cefd5b948075159d8fd2260697200c83b29 WatchSource:0}: Error finding container bcd74f66c965ccd8f577e1196a433cefd5b948075159d8fd2260697200c83b29: Status 404 returned error can't find the container with id bcd74f66c965ccd8f577e1196a433cefd5b948075159d8fd2260697200c83b29 Jan 28 18:46:19 crc kubenswrapper[4767]: I0128 18:46:19.418675 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" event={"ID":"39734b99-2733-4aef-b88e-12a87340933c","Type":"ContainerStarted","Data":"bcd74f66c965ccd8f577e1196a433cefd5b948075159d8fd2260697200c83b29"} Jan 28 18:46:24 crc kubenswrapper[4767]: I0128 18:46:24.467197 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" event={"ID":"39734b99-2733-4aef-b88e-12a87340933c","Type":"ContainerStarted","Data":"e3629775205570cca733435fdf471421dad89aa361693cf89f3046fadd75806c"} Jan 28 18:46:24 crc kubenswrapper[4767]: I0128 18:46:24.468317 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" Jan 28 18:46:24 crc kubenswrapper[4767]: I0128 18:46:24.527185 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" podStartSLOduration=1.636389061 podStartE2EDuration="6.527148281s" podCreationTimestamp="2026-01-28 18:46:18 +0000 UTC" firstStartedPulling="2026-01-28 18:46:18.803081921 +0000 UTC m=+984.767264795" lastFinishedPulling="2026-01-28 18:46:23.693841141 +0000 UTC m=+989.658024015" observedRunningTime="2026-01-28 18:46:24.521424702 +0000 UTC m=+990.485607606" watchObservedRunningTime="2026-01-28 18:46:24.527148281 +0000 UTC m=+990.491331155" Jan 28 18:46:28 crc kubenswrapper[4767]: I0128 18:46:28.463908 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-84ff885fbb-jgrkk" Jan 28 18:46:45 crc kubenswrapper[4767]: I0128 18:46:45.455151 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:46:45 crc kubenswrapper[4767]: I0128 18:46:45.457972 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:46:45 crc kubenswrapper[4767]: I0128 18:46:45.458143 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:46:45 crc kubenswrapper[4767]: I0128 18:46:45.459310 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3b61dfadd6f0461bd4bf84451309ddd45bb81f3a524162955ded57c40d87733d"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:46:45 crc kubenswrapper[4767]: I0128 18:46:45.459519 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://3b61dfadd6f0461bd4bf84451309ddd45bb81f3a524162955ded57c40d87733d" gracePeriod=600 Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.579164 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.582753 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.592614 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-fwf2t" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.593061 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.594464 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.598824 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-rzjwc" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.599395 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.606720 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97w4d\" (UniqueName: \"kubernetes.io/projected/3355ea8c-1093-449e-9a8d-a4598f46242c-kube-api-access-97w4d\") pod \"cinder-operator-controller-manager-f6487bd57-9hvzg\" (UID: \"3355ea8c-1093-449e-9a8d-a4598f46242c\") " pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.606839 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c5b4h\" (UniqueName: \"kubernetes.io/projected/ccee4b83-1a09-4828-b370-5fb768476acc-kube-api-access-c5b4h\") pod \"barbican-operator-controller-manager-6bc7f4f4cf-xctgp\" (UID: \"ccee4b83-1a09-4828-b370-5fb768476acc\") " pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.622477 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.623644 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.629695 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-5lxph" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.660308 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="3b61dfadd6f0461bd4bf84451309ddd45bb81f3a524162955ded57c40d87733d" exitCode=0 Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.660371 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"3b61dfadd6f0461bd4bf84451309ddd45bb81f3a524162955ded57c40d87733d"} Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.660408 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"efe49b1f3887d0c6654c94b4c1818b6bf7a2508307ca13c8afacae337561c559"} Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.660432 4767 scope.go:117] "RemoveContainer" containerID="89b3ad23204d4cf9cafe056f477e14abf29509dbae044fabcb7a013294397e92" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.665322 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.666672 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.672965 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-6rlwl" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.686873 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.708527 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hj25z\" (UniqueName: \"kubernetes.io/projected/4e17b611-fe05-4f69-b64e-b1abb213b297-kube-api-access-hj25z\") pod \"designate-operator-controller-manager-66dfbd6f5d-x8rlq\" (UID: \"4e17b611-fe05-4f69-b64e-b1abb213b297\") " pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.708609 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vd8tb\" (UniqueName: \"kubernetes.io/projected/e2672b52-b1cf-491e-8f76-46c22b19fbbf-kube-api-access-vd8tb\") pod \"glance-operator-controller-manager-6db5dbd896-kb8qp\" (UID: \"e2672b52-b1cf-491e-8f76-46c22b19fbbf\") " pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.708637 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c5b4h\" (UniqueName: \"kubernetes.io/projected/ccee4b83-1a09-4828-b370-5fb768476acc-kube-api-access-c5b4h\") pod \"barbican-operator-controller-manager-6bc7f4f4cf-xctgp\" (UID: \"ccee4b83-1a09-4828-b370-5fb768476acc\") " pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.708800 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97w4d\" (UniqueName: \"kubernetes.io/projected/3355ea8c-1093-449e-9a8d-a4598f46242c-kube-api-access-97w4d\") pod \"cinder-operator-controller-manager-f6487bd57-9hvzg\" (UID: \"3355ea8c-1093-449e-9a8d-a4598f46242c\") " pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.722609 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.736363 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.761873 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c5b4h\" (UniqueName: \"kubernetes.io/projected/ccee4b83-1a09-4828-b370-5fb768476acc-kube-api-access-c5b4h\") pod \"barbican-operator-controller-manager-6bc7f4f4cf-xctgp\" (UID: \"ccee4b83-1a09-4828-b370-5fb768476acc\") " pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.778076 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97w4d\" (UniqueName: \"kubernetes.io/projected/3355ea8c-1093-449e-9a8d-a4598f46242c-kube-api-access-97w4d\") pod \"cinder-operator-controller-manager-f6487bd57-9hvzg\" (UID: \"3355ea8c-1093-449e-9a8d-a4598f46242c\") " pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.791080 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.792518 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.838726 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hj25z\" (UniqueName: \"kubernetes.io/projected/4e17b611-fe05-4f69-b64e-b1abb213b297-kube-api-access-hj25z\") pod \"designate-operator-controller-manager-66dfbd6f5d-x8rlq\" (UID: \"4e17b611-fe05-4f69-b64e-b1abb213b297\") " pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.839312 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vd8tb\" (UniqueName: \"kubernetes.io/projected/e2672b52-b1cf-491e-8f76-46c22b19fbbf-kube-api-access-vd8tb\") pod \"glance-operator-controller-manager-6db5dbd896-kb8qp\" (UID: \"e2672b52-b1cf-491e-8f76-46c22b19fbbf\") " pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.864315 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-7rctp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.906673 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hj25z\" (UniqueName: \"kubernetes.io/projected/4e17b611-fe05-4f69-b64e-b1abb213b297-kube-api-access-hj25z\") pod \"designate-operator-controller-manager-66dfbd6f5d-x8rlq\" (UID: \"4e17b611-fe05-4f69-b64e-b1abb213b297\") " pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.911584 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.913523 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.913635 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.917358 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-gw49l" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.920955 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.928318 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vd8tb\" (UniqueName: \"kubernetes.io/projected/e2672b52-b1cf-491e-8f76-46c22b19fbbf-kube-api-access-vd8tb\") pod \"glance-operator-controller-manager-6db5dbd896-kb8qp\" (UID: \"e2672b52-b1cf-491e-8f76-46c22b19fbbf\") " pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.934075 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.958217 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp"] Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.959846 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.973583 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.973870 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-hbrmh" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.977931 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hgflt\" (UniqueName: \"kubernetes.io/projected/3bbfc3c2-f654-4728-8bc0-da11b96d4246-kube-api-access-hgflt\") pod \"heat-operator-controller-manager-587c6bfdcf-bhkrp\" (UID: \"3bbfc3c2-f654-4728-8bc0-da11b96d4246\") " pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.982491 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" Jan 28 18:46:46 crc kubenswrapper[4767]: I0128 18:46:46.983055 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.007702 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-958664b5-4prpg"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.009090 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.012468 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.017423 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-7lwzf" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.053285 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-958664b5-4prpg"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.081790 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4lgf\" (UniqueName: \"kubernetes.io/projected/4129a635-943e-4417-8934-24c408083149-kube-api-access-x4lgf\") pod \"horizon-operator-controller-manager-5fb775575f-qcsg7\" (UID: \"4129a635-943e-4417-8934-24c408083149\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.081880 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hgflt\" (UniqueName: \"kubernetes.io/projected/3bbfc3c2-f654-4728-8bc0-da11b96d4246-kube-api-access-hgflt\") pod \"heat-operator-controller-manager-587c6bfdcf-bhkrp\" (UID: \"3bbfc3c2-f654-4728-8bc0-da11b96d4246\") " pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.081930 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.081960 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxf8b\" (UniqueName: \"kubernetes.io/projected/e3793181-a73c-494b-ba12-a1a908a7d6f5-kube-api-access-nxf8b\") pod \"ironic-operator-controller-manager-958664b5-4prpg\" (UID: \"e3793181-a73c-494b-ba12-a1a908a7d6f5\") " pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.082008 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-799x8\" (UniqueName: \"kubernetes.io/projected/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-kube-api-access-799x8\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.108464 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.113889 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6978b79747-l4575"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.116404 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hgflt\" (UniqueName: \"kubernetes.io/projected/3bbfc3c2-f654-4728-8bc0-da11b96d4246-kube-api-access-hgflt\") pod \"heat-operator-controller-manager-587c6bfdcf-bhkrp\" (UID: \"3bbfc3c2-f654-4728-8bc0-da11b96d4246\") " pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.117095 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.121295 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-qdzg8" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.169241 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6978b79747-l4575"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.183661 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-799x8\" (UniqueName: \"kubernetes.io/projected/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-kube-api-access-799x8\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.183740 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4lgf\" (UniqueName: \"kubernetes.io/projected/4129a635-943e-4417-8934-24c408083149-kube-api-access-x4lgf\") pod \"horizon-operator-controller-manager-5fb775575f-qcsg7\" (UID: \"4129a635-943e-4417-8934-24c408083149\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.183809 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsrs4\" (UniqueName: \"kubernetes.io/projected/898fb09e-f084-45e3-88bd-7a67ef198bee-kube-api-access-xsrs4\") pod \"keystone-operator-controller-manager-6978b79747-l4575\" (UID: \"898fb09e-f084-45e3-88bd-7a67ef198bee\") " pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.183845 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.183873 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxf8b\" (UniqueName: \"kubernetes.io/projected/e3793181-a73c-494b-ba12-a1a908a7d6f5-kube-api-access-nxf8b\") pod \"ironic-operator-controller-manager-958664b5-4prpg\" (UID: \"e3793181-a73c-494b-ba12-a1a908a7d6f5\") " pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" Jan 28 18:46:47 crc kubenswrapper[4767]: E0128 18:46:47.185479 4767 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:47 crc kubenswrapper[4767]: E0128 18:46:47.185549 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert podName:c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d nodeName:}" failed. No retries permitted until 2026-01-28 18:46:47.685520857 +0000 UTC m=+1013.649703731 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert") pod "infra-operator-controller-manager-79955696d6-gkt4m" (UID: "c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d") : secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.185811 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.208723 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-765668569f-98mkp"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.219167 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.220404 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4lgf\" (UniqueName: \"kubernetes.io/projected/4129a635-943e-4417-8934-24c408083149-kube-api-access-x4lgf\") pod \"horizon-operator-controller-manager-5fb775575f-qcsg7\" (UID: \"4129a635-943e-4417-8934-24c408083149\") " pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.224074 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-799x8\" (UniqueName: \"kubernetes.io/projected/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-kube-api-access-799x8\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.224424 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-6rvmh" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.233475 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxf8b\" (UniqueName: \"kubernetes.io/projected/e3793181-a73c-494b-ba12-a1a908a7d6f5-kube-api-access-nxf8b\") pod \"ironic-operator-controller-manager-958664b5-4prpg\" (UID: \"e3793181-a73c-494b-ba12-a1a908a7d6f5\") " pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.243435 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-765668569f-98mkp"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.257855 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.260840 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.269476 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-dms4j" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.289040 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsrs4\" (UniqueName: \"kubernetes.io/projected/898fb09e-f084-45e3-88bd-7a67ef198bee-kube-api-access-xsrs4\") pod \"keystone-operator-controller-manager-6978b79747-l4575\" (UID: \"898fb09e-f084-45e3-88bd-7a67ef198bee\") " pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.289180 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zftc\" (UniqueName: \"kubernetes.io/projected/b48deaef-0712-425b-8d49-133c4931ea06-kube-api-access-6zftc\") pod \"manila-operator-controller-manager-765668569f-98mkp\" (UID: \"b48deaef-0712-425b-8d49-133c4931ea06\") " pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.291309 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.292632 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.295930 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-2lpqr" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.299574 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.317297 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.322707 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsrs4\" (UniqueName: \"kubernetes.io/projected/898fb09e-f084-45e3-88bd-7a67ef198bee-kube-api-access-xsrs4\") pod \"keystone-operator-controller-manager-6978b79747-l4575\" (UID: \"898fb09e-f084-45e3-88bd-7a67ef198bee\") " pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.336859 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.350134 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.358437 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.364005 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-wnc8r" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.382038 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.392271 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r7r27\" (UniqueName: \"kubernetes.io/projected/14052357-c381-4e6b-ad51-13179cd09877-kube-api-access-r7r27\") pod \"mariadb-operator-controller-manager-67bf948998-222q9\" (UID: \"14052357-c381-4e6b-ad51-13179cd09877\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.392395 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gftgg\" (UniqueName: \"kubernetes.io/projected/58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8-kube-api-access-gftgg\") pod \"neutron-operator-controller-manager-694c5bfc85-vsdzj\" (UID: \"58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8\") " pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.392456 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zftc\" (UniqueName: \"kubernetes.io/projected/b48deaef-0712-425b-8d49-133c4931ea06-kube-api-access-6zftc\") pod \"manila-operator-controller-manager-765668569f-98mkp\" (UID: \"b48deaef-0712-425b-8d49-133c4931ea06\") " pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.411502 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.412719 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.421232 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-hwthc" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.422827 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.432830 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zftc\" (UniqueName: \"kubernetes.io/projected/b48deaef-0712-425b-8d49-133c4931ea06-kube-api-access-6zftc\") pod \"manila-operator-controller-manager-765668569f-98mkp\" (UID: \"b48deaef-0712-425b-8d49-133c4931ea06\") " pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.450414 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.467193 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.486607 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.487610 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.491280 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-p4t84" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.497320 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.498368 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r7r27\" (UniqueName: \"kubernetes.io/projected/14052357-c381-4e6b-ad51-13179cd09877-kube-api-access-r7r27\") pod \"mariadb-operator-controller-manager-67bf948998-222q9\" (UID: \"14052357-c381-4e6b-ad51-13179cd09877\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.498488 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gftgg\" (UniqueName: \"kubernetes.io/projected/58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8-kube-api-access-gftgg\") pod \"neutron-operator-controller-manager-694c5bfc85-vsdzj\" (UID: \"58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8\") " pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.498541 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.498771 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7nsw\" (UniqueName: \"kubernetes.io/projected/fbe04280-2c8a-4f54-9442-26fb1f381358-kube-api-access-v7nsw\") pod \"octavia-operator-controller-manager-5c765b4558-v7nnq\" (UID: \"fbe04280-2c8a-4f54-9442-26fb1f381358\") " pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.498833 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8jzv\" (UniqueName: \"kubernetes.io/projected/a0962425-fb2a-4acc-966b-544669cd2dc6-kube-api-access-d8jzv\") pod \"nova-operator-controller-manager-ddcbfd695-mrhz5\" (UID: \"a0962425-fb2a-4acc-966b-544669cd2dc6\") " pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.500866 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-szshm" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.501036 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.549659 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r7r27\" (UniqueName: \"kubernetes.io/projected/14052357-c381-4e6b-ad51-13179cd09877-kube-api-access-r7r27\") pod \"mariadb-operator-controller-manager-67bf948998-222q9\" (UID: \"14052357-c381-4e6b-ad51-13179cd09877\") " pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.551072 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gftgg\" (UniqueName: \"kubernetes.io/projected/58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8-kube-api-access-gftgg\") pod \"neutron-operator-controller-manager-694c5bfc85-vsdzj\" (UID: \"58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8\") " pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.560335 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.563394 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.582911 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.599032 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.602034 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7nsw\" (UniqueName: \"kubernetes.io/projected/fbe04280-2c8a-4f54-9442-26fb1f381358-kube-api-access-v7nsw\") pod \"octavia-operator-controller-manager-5c765b4558-v7nnq\" (UID: \"fbe04280-2c8a-4f54-9442-26fb1f381358\") " pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.602086 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8jzv\" (UniqueName: \"kubernetes.io/projected/a0962425-fb2a-4acc-966b-544669cd2dc6-kube-api-access-d8jzv\") pod \"nova-operator-controller-manager-ddcbfd695-mrhz5\" (UID: \"a0962425-fb2a-4acc-966b-544669cd2dc6\") " pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.602148 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.609930 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzsrh\" (UniqueName: \"kubernetes.io/projected/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-kube-api-access-pzsrh\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.610041 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx867\" (UniqueName: \"kubernetes.io/projected/56088743-c7ed-4882-b2e5-0845caba050e-kube-api-access-qx867\") pod \"ovn-operator-controller-manager-788c46999f-hq5gx\" (UID: \"56088743-c7ed-4882-b2e5-0845caba050e\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.610193 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.622533 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.624898 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-pwhzf" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.640651 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8jzv\" (UniqueName: \"kubernetes.io/projected/a0962425-fb2a-4acc-966b-544669cd2dc6-kube-api-access-d8jzv\") pod \"nova-operator-controller-manager-ddcbfd695-mrhz5\" (UID: \"a0962425-fb2a-4acc-966b-544669cd2dc6\") " pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.645972 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.649798 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7nsw\" (UniqueName: \"kubernetes.io/projected/fbe04280-2c8a-4f54-9442-26fb1f381358-kube-api-access-v7nsw\") pod \"octavia-operator-controller-manager-5c765b4558-v7nnq\" (UID: \"fbe04280-2c8a-4f54-9442-26fb1f381358\") " pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.661365 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.674089 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.679804 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.680413 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.681582 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.684587 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-p2qlg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.685265 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-69dq7" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.698060 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.709235 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.712241 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d8kp\" (UniqueName: \"kubernetes.io/projected/a5d0d7c1-8591-4619-912c-8db740ebd050-kube-api-access-7d8kp\") pod \"placement-operator-controller-manager-5b964cf4cd-b52rs\" (UID: \"a5d0d7c1-8591-4619-912c-8db740ebd050\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.712327 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzsrh\" (UniqueName: \"kubernetes.io/projected/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-kube-api-access-pzsrh\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.712387 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx867\" (UniqueName: \"kubernetes.io/projected/56088743-c7ed-4882-b2e5-0845caba050e-kube-api-access-qx867\") pod \"ovn-operator-controller-manager-788c46999f-hq5gx\" (UID: \"56088743-c7ed-4882-b2e5-0845caba050e\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.712451 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.712505 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:47 crc kubenswrapper[4767]: E0128 18:46:47.712721 4767 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:47 crc kubenswrapper[4767]: E0128 18:46:47.712813 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert podName:ac7dbf1c-e4ce-4b04-8723-6166810cdf9b nodeName:}" failed. No retries permitted until 2026-01-28 18:46:48.212787063 +0000 UTC m=+1014.176969937 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" (UID: "ac7dbf1c-e4ce-4b04-8723-6166810cdf9b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:47 crc kubenswrapper[4767]: E0128 18:46:47.714540 4767 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:47 crc kubenswrapper[4767]: E0128 18:46:47.714614 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert podName:c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d nodeName:}" failed. No retries permitted until 2026-01-28 18:46:48.714590029 +0000 UTC m=+1014.678772903 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert") pod "infra-operator-controller-manager-79955696d6-gkt4m" (UID: "c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d") : secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.738315 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.744232 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.745611 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.746775 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.749994 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-6nmj4" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.753816 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.755614 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzsrh\" (UniqueName: \"kubernetes.io/projected/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-kube-api-access-pzsrh\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.764248 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx867\" (UniqueName: \"kubernetes.io/projected/56088743-c7ed-4882-b2e5-0845caba050e-kube-api-access-qx867\") pod \"ovn-operator-controller-manager-788c46999f-hq5gx\" (UID: \"56088743-c7ed-4882-b2e5-0845caba050e\") " pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.771921 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.773638 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.778467 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-jgz4w" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.781509 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.813667 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvs9p\" (UniqueName: \"kubernetes.io/projected/0d7c50d3-1348-43e5-a8fa-f05cd53d2a42-kube-api-access-nvs9p\") pod \"telemetry-operator-controller-manager-79f6f75b9c-dhf5c\" (UID: \"0d7c50d3-1348-43e5-a8fa-f05cd53d2a42\") " pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.813727 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d8kp\" (UniqueName: \"kubernetes.io/projected/a5d0d7c1-8591-4619-912c-8db740ebd050-kube-api-access-7d8kp\") pod \"placement-operator-controller-manager-5b964cf4cd-b52rs\" (UID: \"a5d0d7c1-8591-4619-912c-8db740ebd050\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.813791 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zkmpl\" (UniqueName: \"kubernetes.io/projected/b60452a4-90a3-492d-be1d-f481ed0fce75-kube-api-access-zkmpl\") pod \"test-operator-controller-manager-56f8bfcd9f-tkgjs\" (UID: \"b60452a4-90a3-492d-be1d-f481ed0fce75\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.813836 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6mst6\" (UniqueName: \"kubernetes.io/projected/77bf9c1d-826f-418d-8a94-80e4d46cc051-kube-api-access-6mst6\") pod \"swift-operator-controller-manager-68fc8c869-w4dzg\" (UID: \"77bf9c1d-826f-418d-8a94-80e4d46cc051\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" Jan 28 18:46:47 crc kubenswrapper[4767]: W0128 18:46:47.814263 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podccee4b83_1a09_4828_b370_5fb768476acc.slice/crio-cda9e7d0e2461214838c6fee7377c43f12e49d2b38a7202df96df3dc745b457c WatchSource:0}: Error finding container cda9e7d0e2461214838c6fee7377c43f12e49d2b38a7202df96df3dc745b457c: Status 404 returned error can't find the container with id cda9e7d0e2461214838c6fee7377c43f12e49d2b38a7202df96df3dc745b457c Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.820080 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.846914 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d8kp\" (UniqueName: \"kubernetes.io/projected/a5d0d7c1-8591-4619-912c-8db740ebd050-kube-api-access-7d8kp\") pod \"placement-operator-controller-manager-5b964cf4cd-b52rs\" (UID: \"a5d0d7c1-8591-4619-912c-8db740ebd050\") " pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.854732 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.855718 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.858495 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.859070 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ckfl4" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.859253 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.870014 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.876837 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.878224 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.882590 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-2rmdd" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.887518 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2"] Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.915254 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6mst6\" (UniqueName: \"kubernetes.io/projected/77bf9c1d-826f-418d-8a94-80e4d46cc051-kube-api-access-6mst6\") pod \"swift-operator-controller-manager-68fc8c869-w4dzg\" (UID: \"77bf9c1d-826f-418d-8a94-80e4d46cc051\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.915314 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.915357 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvs9p\" (UniqueName: \"kubernetes.io/projected/0d7c50d3-1348-43e5-a8fa-f05cd53d2a42-kube-api-access-nvs9p\") pod \"telemetry-operator-controller-manager-79f6f75b9c-dhf5c\" (UID: \"0d7c50d3-1348-43e5-a8fa-f05cd53d2a42\") " pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.915389 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.915425 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fs94\" (UniqueName: \"kubernetes.io/projected/17bdefa0-fed1-4dd5-abde-10d52eebbdb8-kube-api-access-6fs94\") pod \"watcher-operator-controller-manager-767b8bc766-wvg8s\" (UID: \"17bdefa0-fed1-4dd5-abde-10d52eebbdb8\") " pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.915441 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zkmpl\" (UniqueName: \"kubernetes.io/projected/b60452a4-90a3-492d-be1d-f481ed0fce75-kube-api-access-zkmpl\") pod \"test-operator-controller-manager-56f8bfcd9f-tkgjs\" (UID: \"b60452a4-90a3-492d-be1d-f481ed0fce75\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.915458 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qdflp\" (UniqueName: \"kubernetes.io/projected/964b21b6-27a7-4fa9-9f44-ddb1484e7266-kube-api-access-qdflp\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.943412 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6mst6\" (UniqueName: \"kubernetes.io/projected/77bf9c1d-826f-418d-8a94-80e4d46cc051-kube-api-access-6mst6\") pod \"swift-operator-controller-manager-68fc8c869-w4dzg\" (UID: \"77bf9c1d-826f-418d-8a94-80e4d46cc051\") " pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.956623 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvs9p\" (UniqueName: \"kubernetes.io/projected/0d7c50d3-1348-43e5-a8fa-f05cd53d2a42-kube-api-access-nvs9p\") pod \"telemetry-operator-controller-manager-79f6f75b9c-dhf5c\" (UID: \"0d7c50d3-1348-43e5-a8fa-f05cd53d2a42\") " pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.956814 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zkmpl\" (UniqueName: \"kubernetes.io/projected/b60452a4-90a3-492d-be1d-f481ed0fce75-kube-api-access-zkmpl\") pod \"test-operator-controller-manager-56f8bfcd9f-tkgjs\" (UID: \"b60452a4-90a3-492d-be1d-f481ed0fce75\") " pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.965335 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.967306 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" Jan 28 18:46:47 crc kubenswrapper[4767]: I0128 18:46:47.990572 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.004619 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.015778 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.021846 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.021943 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvmxx\" (UniqueName: \"kubernetes.io/projected/83eb3c39-bbf1-4059-ae27-c3a8aac5ad69-kube-api-access-cvmxx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xjfz2\" (UID: \"83eb3c39-bbf1-4059-ae27-c3a8aac5ad69\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.022108 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fs94\" (UniqueName: \"kubernetes.io/projected/17bdefa0-fed1-4dd5-abde-10d52eebbdb8-kube-api-access-6fs94\") pod \"watcher-operator-controller-manager-767b8bc766-wvg8s\" (UID: \"17bdefa0-fed1-4dd5-abde-10d52eebbdb8\") " pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.022184 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qdflp\" (UniqueName: \"kubernetes.io/projected/964b21b6-27a7-4fa9-9f44-ddb1484e7266-kube-api-access-qdflp\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.022403 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.022738 4767 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.022839 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:48.522812182 +0000 UTC m=+1014.486995056 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "metrics-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.023602 4767 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.023651 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:48.523640889 +0000 UTC m=+1014.487823763 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.036995 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.058553 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fs94\" (UniqueName: \"kubernetes.io/projected/17bdefa0-fed1-4dd5-abde-10d52eebbdb8-kube-api-access-6fs94\") pod \"watcher-operator-controller-manager-767b8bc766-wvg8s\" (UID: \"17bdefa0-fed1-4dd5-abde-10d52eebbdb8\") " pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.068222 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qdflp\" (UniqueName: \"kubernetes.io/projected/964b21b6-27a7-4fa9-9f44-ddb1484e7266-kube-api-access-qdflp\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.126644 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvmxx\" (UniqueName: \"kubernetes.io/projected/83eb3c39-bbf1-4059-ae27-c3a8aac5ad69-kube-api-access-cvmxx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xjfz2\" (UID: \"83eb3c39-bbf1-4059-ae27-c3a8aac5ad69\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.141425 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.148534 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvmxx\" (UniqueName: \"kubernetes.io/projected/83eb3c39-bbf1-4059-ae27-c3a8aac5ad69-kube-api-access-cvmxx\") pod \"rabbitmq-cluster-operator-manager-668c99d594-xjfz2\" (UID: \"83eb3c39-bbf1-4059-ae27-c3a8aac5ad69\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.151552 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.175715 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.187554 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.192992 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7"] Jan 28 18:46:48 crc kubenswrapper[4767]: W0128 18:46:48.231632 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4129a635_943e_4417_8934_24c408083149.slice/crio-010f7b4b8e946bc8564ee919d58e0a8a804a47bb51c45c644e042fbc7a4a4e13 WatchSource:0}: Error finding container 010f7b4b8e946bc8564ee919d58e0a8a804a47bb51c45c644e042fbc7a4a4e13: Status 404 returned error can't find the container with id 010f7b4b8e946bc8564ee919d58e0a8a804a47bb51c45c644e042fbc7a4a4e13 Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.235464 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.235987 4767 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.236075 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert podName:ac7dbf1c-e4ce-4b04-8723-6166810cdf9b nodeName:}" failed. No retries permitted until 2026-01-28 18:46:49.236046544 +0000 UTC m=+1015.200229418 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" (UID: "ac7dbf1c-e4ce-4b04-8723-6166810cdf9b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.289534 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-958664b5-4prpg"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.311085 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.335032 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-6978b79747-l4575"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.546332 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.546548 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.546706 4767 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.546771 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:49.546752364 +0000 UTC m=+1015.510935238 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "metrics-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.547136 4767 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.547612 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:49.54755657 +0000 UTC m=+1015.511739444 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.555543 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.560959 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-765668569f-98mkp"] Jan 28 18:46:48 crc kubenswrapper[4767]: W0128 18:46:48.580449 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb48deaef_0712_425b_8d49_133c4931ea06.slice/crio-8e8d8d124a76258778d8152c42ebe98fb77390182e060336c777d62f3c9bdc67 WatchSource:0}: Error finding container 8e8d8d124a76258778d8152c42ebe98fb77390182e060336c777d62f3c9bdc67: Status 404 returned error can't find the container with id 8e8d8d124a76258778d8152c42ebe98fb77390182e060336c777d62f3c9bdc67 Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.705068 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9"] Jan 28 18:46:48 crc kubenswrapper[4767]: W0128 18:46:48.708379 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14052357_c381_4e6b_ad51_13179cd09877.slice/crio-5a6ebf753bd6496db9524dd130b4a01d3ee3f9d82cf27179f9ec168a95f15a28 WatchSource:0}: Error finding container 5a6ebf753bd6496db9524dd130b4a01d3ee3f9d82cf27179f9ec168a95f15a28: Status 404 returned error can't find the container with id 5a6ebf753bd6496db9524dd130b4a01d3ee3f9d82cf27179f9ec168a95f15a28 Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.732322 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.750558 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.750558 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" event={"ID":"898fb09e-f084-45e3-88bd-7a67ef198bee","Type":"ContainerStarted","Data":"705555253ac59d6d2d45c31eea1bac2685ba0e10b93d23b624b442327108e237"} Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.750714 4767 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.750823 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert podName:c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d nodeName:}" failed. No retries permitted until 2026-01-28 18:46:50.750801078 +0000 UTC m=+1016.714983952 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert") pod "infra-operator-controller-manager-79955696d6-gkt4m" (UID: "c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d") : secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.759415 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" event={"ID":"14052357-c381-4e6b-ad51-13179cd09877","Type":"ContainerStarted","Data":"5a6ebf753bd6496db9524dd130b4a01d3ee3f9d82cf27179f9ec168a95f15a28"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.762019 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" event={"ID":"3355ea8c-1093-449e-9a8d-a4598f46242c","Type":"ContainerStarted","Data":"109d297e9167ebd980cb8aab89cbd4220701b5610cdcb73f40bb445de52e571d"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.765369 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" event={"ID":"e2672b52-b1cf-491e-8f76-46c22b19fbbf","Type":"ContainerStarted","Data":"49927c5a0ab3bdc41df92c1ae6cdf80d0307b53130afdf5de8f718dcfd64e959"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.766542 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" event={"ID":"e3793181-a73c-494b-ba12-a1a908a7d6f5","Type":"ContainerStarted","Data":"a17b92b0a9711672663f82601b647f30518802a5f9515cc9c1025b68fb10810b"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.767633 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" event={"ID":"4e17b611-fe05-4f69-b64e-b1abb213b297","Type":"ContainerStarted","Data":"ecf76ed3e9a2bfb939ac1f641996af35848dfb4b059ac2cc6f8c529ce72bbd78"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.769379 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" event={"ID":"ccee4b83-1a09-4828-b370-5fb768476acc","Type":"ContainerStarted","Data":"cda9e7d0e2461214838c6fee7377c43f12e49d2b38a7202df96df3dc745b457c"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.771181 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" event={"ID":"58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8","Type":"ContainerStarted","Data":"d271af2276c305ffbdf2d50ee507a6a84ab5e685ef1c40a15fb444cff1c4c947"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.773223 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" event={"ID":"b48deaef-0712-425b-8d49-133c4931ea06","Type":"ContainerStarted","Data":"8e8d8d124a76258778d8152c42ebe98fb77390182e060336c777d62f3c9bdc67"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.775037 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" event={"ID":"4129a635-943e-4417-8934-24c408083149","Type":"ContainerStarted","Data":"010f7b4b8e946bc8564ee919d58e0a8a804a47bb51c45c644e042fbc7a4a4e13"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.780834 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" event={"ID":"3bbfc3c2-f654-4728-8bc0-da11b96d4246","Type":"ContainerStarted","Data":"2fc80e7c4cf812dd45a00f87da13f9704493f0fec7b5ddea1415c156fd81e095"} Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.912109 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs"] Jan 28 18:46:48 crc kubenswrapper[4767]: W0128 18:46:48.931595 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56088743_c7ed_4882_b2e5_0845caba050e.slice/crio-53cf23379d74b53ac8850962e03edce21fdb53e48dfc50adf6fe13753d9744f7 WatchSource:0}: Error finding container 53cf23379d74b53ac8850962e03edce21fdb53e48dfc50adf6fe13753d9744f7: Status 404 returned error can't find the container with id 53cf23379d74b53ac8850962e03edce21fdb53e48dfc50adf6fe13753d9744f7 Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.934022 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.942368 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs"] Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.955430 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx"] Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.958915 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-zkmpl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-56f8bfcd9f-tkgjs_openstack-operators(b60452a4-90a3-492d-be1d-f481ed0fce75): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.959331 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:38.102.83.248:5001/openstack-k8s-operators/telemetry-operator:774b657c4a2d169eb939c51d71a146bf4a44e93b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-nvs9p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-79f6f75b9c-dhf5c_openstack-operators(0d7c50d3-1348-43e5-a8fa-f05cd53d2a42): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 18:46:48 crc kubenswrapper[4767]: I0128 18:46:48.960043 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c"] Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.960168 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" podUID="b60452a4-90a3-492d-be1d-f481ed0fce75" Jan 28 18:46:48 crc kubenswrapper[4767]: E0128 18:46:48.961725 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" podUID="0d7c50d3-1348-43e5-a8fa-f05cd53d2a42" Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.173309 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg"] Jan 28 18:46:49 crc kubenswrapper[4767]: W0128 18:46:49.183321 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77bf9c1d_826f_418d_8a94_80e4d46cc051.slice/crio-de9a7ba9486badfc6fde31fe3542b4fcf90162939607e8bd35c370b78fd97d29 WatchSource:0}: Error finding container de9a7ba9486badfc6fde31fe3542b4fcf90162939607e8bd35c370b78fd97d29: Status 404 returned error can't find the container with id de9a7ba9486badfc6fde31fe3542b4fcf90162939607e8bd35c370b78fd97d29 Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.189497 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/watcher-operator@sha256:35f1eb96f42069bb8f7c33942fb86b41843ba02803464245c16192ccda3d50e4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6fs94,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-767b8bc766-wvg8s_openstack-operators(17bdefa0-fed1-4dd5-abde-10d52eebbdb8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.190681 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" podUID="17bdefa0-fed1-4dd5-abde-10d52eebbdb8" Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.191742 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2"] Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.199738 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s"] Jan 28 18:46:49 crc kubenswrapper[4767]: W0128 18:46:49.207308 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod83eb3c39_bbf1_4059_ae27_c3a8aac5ad69.slice/crio-42599a237586c87b93b86fd2a752cea8ad2536a5f17e26d5626993b05fbf27ad WatchSource:0}: Error finding container 42599a237586c87b93b86fd2a752cea8ad2536a5f17e26d5626993b05fbf27ad: Status 404 returned error can't find the container with id 42599a237586c87b93b86fd2a752cea8ad2536a5f17e26d5626993b05fbf27ad Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.263896 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.264259 4767 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.264344 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert podName:ac7dbf1c-e4ce-4b04-8723-6166810cdf9b nodeName:}" failed. No retries permitted until 2026-01-28 18:46:51.264322904 +0000 UTC m=+1017.228505778 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" (UID: "ac7dbf1c-e4ce-4b04-8723-6166810cdf9b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.569648 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.569786 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.569882 4767 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.569986 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:51.569963097 +0000 UTC m=+1017.534145971 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "webhook-server-cert" not found Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.570019 4767 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.570101 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:51.5700787 +0000 UTC m=+1017.534261744 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "metrics-server-cert" not found Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.792782 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" event={"ID":"b60452a4-90a3-492d-be1d-f481ed0fce75","Type":"ContainerStarted","Data":"f3452a1f217126b3c4bc8860e6134051a0f2e1815f38a1143ad6dfc5259482c4"} Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.795089 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" event={"ID":"a0962425-fb2a-4acc-966b-544669cd2dc6","Type":"ContainerStarted","Data":"06b18de97eaeb4366cce8f86eb8e4ed64b199192dd72b6a5b7f484f8c149ac2c"} Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.795130 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" podUID="b60452a4-90a3-492d-be1d-f481ed0fce75" Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.797170 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" event={"ID":"77bf9c1d-826f-418d-8a94-80e4d46cc051","Type":"ContainerStarted","Data":"de9a7ba9486badfc6fde31fe3542b4fcf90162939607e8bd35c370b78fd97d29"} Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.799919 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" event={"ID":"fbe04280-2c8a-4f54-9442-26fb1f381358","Type":"ContainerStarted","Data":"0178cd18138aa928162a8a136ba65c343829f3ca366d4f6e33d2d8a8a7dbe660"} Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.801866 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" event={"ID":"56088743-c7ed-4882-b2e5-0845caba050e","Type":"ContainerStarted","Data":"53cf23379d74b53ac8850962e03edce21fdb53e48dfc50adf6fe13753d9744f7"} Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.803055 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" event={"ID":"a5d0d7c1-8591-4619-912c-8db740ebd050","Type":"ContainerStarted","Data":"9b453afbc7302dc192825a5bac007b4caf2d95ea2958022b15e281f340023ea6"} Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.804942 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" event={"ID":"0d7c50d3-1348-43e5-a8fa-f05cd53d2a42","Type":"ContainerStarted","Data":"e343ce9c038aa2901e71c017aa4e38411aab26744176197ce79651da5b0b52df"} Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.809837 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" event={"ID":"17bdefa0-fed1-4dd5-abde-10d52eebbdb8","Type":"ContainerStarted","Data":"64d5431054a00de300704ce10bb9b2d54372b4488a09d1ff3b9353aa05d36e92"} Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.815599 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.248:5001/openstack-k8s-operators/telemetry-operator:774b657c4a2d169eb939c51d71a146bf4a44e93b\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" podUID="0d7c50d3-1348-43e5-a8fa-f05cd53d2a42" Jan 28 18:46:49 crc kubenswrapper[4767]: E0128 18:46:49.815693 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/watcher-operator@sha256:35f1eb96f42069bb8f7c33942fb86b41843ba02803464245c16192ccda3d50e4\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" podUID="17bdefa0-fed1-4dd5-abde-10d52eebbdb8" Jan 28 18:46:49 crc kubenswrapper[4767]: I0128 18:46:49.817097 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" event={"ID":"83eb3c39-bbf1-4059-ae27-c3a8aac5ad69","Type":"ContainerStarted","Data":"42599a237586c87b93b86fd2a752cea8ad2536a5f17e26d5626993b05fbf27ad"} Jan 28 18:46:50 crc kubenswrapper[4767]: I0128 18:46:50.798838 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:50 crc kubenswrapper[4767]: E0128 18:46:50.799244 4767 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:50 crc kubenswrapper[4767]: E0128 18:46:50.799286 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert podName:c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d nodeName:}" failed. No retries permitted until 2026-01-28 18:46:54.799272756 +0000 UTC m=+1020.763455630 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert") pod "infra-operator-controller-manager-79955696d6-gkt4m" (UID: "c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d") : secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:50 crc kubenswrapper[4767]: E0128 18:46:50.828749 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/watcher-operator@sha256:35f1eb96f42069bb8f7c33942fb86b41843ba02803464245c16192ccda3d50e4\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" podUID="17bdefa0-fed1-4dd5-abde-10d52eebbdb8" Jan 28 18:46:50 crc kubenswrapper[4767]: E0128 18:46:50.828749 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:3e01e99d3ca1b6c20b1bb015b00cfcbffc584f22a93dc6fe4019d63b813c0241\\\"\"" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" podUID="b60452a4-90a3-492d-be1d-f481ed0fce75" Jan 28 18:46:50 crc kubenswrapper[4767]: E0128 18:46:50.829015 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"38.102.83.248:5001/openstack-k8s-operators/telemetry-operator:774b657c4a2d169eb939c51d71a146bf4a44e93b\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" podUID="0d7c50d3-1348-43e5-a8fa-f05cd53d2a42" Jan 28 18:46:51 crc kubenswrapper[4767]: I0128 18:46:51.308413 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:51 crc kubenswrapper[4767]: E0128 18:46:51.308667 4767 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:51 crc kubenswrapper[4767]: E0128 18:46:51.308767 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert podName:ac7dbf1c-e4ce-4b04-8723-6166810cdf9b nodeName:}" failed. No retries permitted until 2026-01-28 18:46:55.308743815 +0000 UTC m=+1021.272926689 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" (UID: "ac7dbf1c-e4ce-4b04-8723-6166810cdf9b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:51 crc kubenswrapper[4767]: I0128 18:46:51.615827 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:51 crc kubenswrapper[4767]: I0128 18:46:51.616344 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:51 crc kubenswrapper[4767]: E0128 18:46:51.616106 4767 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 18:46:51 crc kubenswrapper[4767]: E0128 18:46:51.616520 4767 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 18:46:51 crc kubenswrapper[4767]: E0128 18:46:51.616543 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:55.616511014 +0000 UTC m=+1021.580693888 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "webhook-server-cert" not found Jan 28 18:46:51 crc kubenswrapper[4767]: E0128 18:46:51.617170 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:46:55.617145733 +0000 UTC m=+1021.581328797 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "metrics-server-cert" not found Jan 28 18:46:54 crc kubenswrapper[4767]: I0128 18:46:54.867784 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:46:54 crc kubenswrapper[4767]: E0128 18:46:54.867950 4767 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:54 crc kubenswrapper[4767]: E0128 18:46:54.868194 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert podName:c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d nodeName:}" failed. No retries permitted until 2026-01-28 18:47:02.868178072 +0000 UTC m=+1028.832360946 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert") pod "infra-operator-controller-manager-79955696d6-gkt4m" (UID: "c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d") : secret "infra-operator-webhook-server-cert" not found Jan 28 18:46:55 crc kubenswrapper[4767]: I0128 18:46:55.377320 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:46:55 crc kubenswrapper[4767]: E0128 18:46:55.377567 4767 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:55 crc kubenswrapper[4767]: E0128 18:46:55.377637 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert podName:ac7dbf1c-e4ce-4b04-8723-6166810cdf9b nodeName:}" failed. No retries permitted until 2026-01-28 18:47:03.37761767 +0000 UTC m=+1029.341800544 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert") pod "openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" (UID: "ac7dbf1c-e4ce-4b04-8723-6166810cdf9b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 28 18:46:55 crc kubenswrapper[4767]: I0128 18:46:55.682102 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:55 crc kubenswrapper[4767]: I0128 18:46:55.682223 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:46:55 crc kubenswrapper[4767]: E0128 18:46:55.682272 4767 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 28 18:46:55 crc kubenswrapper[4767]: E0128 18:46:55.682308 4767 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 28 18:46:55 crc kubenswrapper[4767]: E0128 18:46:55.682352 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:47:03.682336034 +0000 UTC m=+1029.646518908 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "metrics-server-cert" not found Jan 28 18:46:55 crc kubenswrapper[4767]: E0128 18:46:55.682364 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs podName:964b21b6-27a7-4fa9-9f44-ddb1484e7266 nodeName:}" failed. No retries permitted until 2026-01-28 18:47:03.682359344 +0000 UTC m=+1029.646542218 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs") pod "openstack-operator-controller-manager-77dc76679b-k6848" (UID: "964b21b6-27a7-4fa9-9f44-ddb1484e7266") : secret "webhook-server-cert" not found Jan 28 18:47:01 crc kubenswrapper[4767]: E0128 18:47:01.216138 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/designate-operator@sha256:29a3092217e72f1ec8a163ed3d15a0a5ccc5b3117e64c72bf5e68597cc233b3d" Jan 28 18:47:01 crc kubenswrapper[4767]: E0128 18:47:01.217313 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/designate-operator@sha256:29a3092217e72f1ec8a163ed3d15a0a5ccc5b3117e64c72bf5e68597cc233b3d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-hj25z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-66dfbd6f5d-x8rlq_openstack-operators(4e17b611-fe05-4f69-b64e-b1abb213b297): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:01 crc kubenswrapper[4767]: E0128 18:47:01.218501 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" podUID="4e17b611-fe05-4f69-b64e-b1abb213b297" Jan 28 18:47:01 crc kubenswrapper[4767]: E0128 18:47:01.924347 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/designate-operator@sha256:29a3092217e72f1ec8a163ed3d15a0a5ccc5b3117e64c72bf5e68597cc233b3d\\\"\"" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" podUID="4e17b611-fe05-4f69-b64e-b1abb213b297" Jan 28 18:47:01 crc kubenswrapper[4767]: E0128 18:47:01.950589 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/glance-operator@sha256:8a7e2637765333c555b0b932c2bfc789235aea2c7276961657a03ef1352a7264" Jan 28 18:47:01 crc kubenswrapper[4767]: E0128 18:47:01.950793 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/glance-operator@sha256:8a7e2637765333c555b0b932c2bfc789235aea2c7276961657a03ef1352a7264,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-vd8tb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-6db5dbd896-kb8qp_openstack-operators(e2672b52-b1cf-491e-8f76-46c22b19fbbf): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:01 crc kubenswrapper[4767]: E0128 18:47:01.951871 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" podUID="e2672b52-b1cf-491e-8f76-46c22b19fbbf" Jan 28 18:47:02 crc kubenswrapper[4767]: E0128 18:47:02.581974 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/cinder-operator@sha256:6da7ec7bf701fe1dd489852a16429f163a69073fae67b872dca4b080cc3514ad" Jan 28 18:47:02 crc kubenswrapper[4767]: E0128 18:47:02.582309 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/cinder-operator@sha256:6da7ec7bf701fe1dd489852a16429f163a69073fae67b872dca4b080cc3514ad,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-97w4d,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-f6487bd57-9hvzg_openstack-operators(3355ea8c-1093-449e-9a8d-a4598f46242c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:02 crc kubenswrapper[4767]: E0128 18:47:02.583589 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" podUID="3355ea8c-1093-449e-9a8d-a4598f46242c" Jan 28 18:47:02 crc kubenswrapper[4767]: I0128 18:47:02.919383 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:47:02 crc kubenswrapper[4767]: E0128 18:47:02.919716 4767 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 28 18:47:02 crc kubenswrapper[4767]: E0128 18:47:02.919978 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert podName:c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d nodeName:}" failed. No retries permitted until 2026-01-28 18:47:18.919941765 +0000 UTC m=+1044.884124639 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert") pod "infra-operator-controller-manager-79955696d6-gkt4m" (UID: "c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d") : secret "infra-operator-webhook-server-cert" not found Jan 28 18:47:02 crc kubenswrapper[4767]: E0128 18:47:02.935616 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/cinder-operator@sha256:6da7ec7bf701fe1dd489852a16429f163a69073fae67b872dca4b080cc3514ad\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" podUID="3355ea8c-1093-449e-9a8d-a4598f46242c" Jan 28 18:47:02 crc kubenswrapper[4767]: E0128 18:47:02.935710 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/glance-operator@sha256:8a7e2637765333c555b0b932c2bfc789235aea2c7276961657a03ef1352a7264\\\"\"" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" podUID="e2672b52-b1cf-491e-8f76-46c22b19fbbf" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.451017 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.469978 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ac7dbf1c-e4ce-4b04-8723-6166810cdf9b-cert\") pod \"openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd\" (UID: \"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.755510 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.755635 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.763194 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-metrics-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.763489 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-szshm" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.764400 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/964b21b6-27a7-4fa9-9f44-ddb1484e7266-webhook-certs\") pod \"openstack-operator-controller-manager-77dc76679b-k6848\" (UID: \"964b21b6-27a7-4fa9-9f44-ddb1484e7266\") " pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.771645 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.954895 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-ckfl4" Jan 28 18:47:03 crc kubenswrapper[4767]: I0128 18:47:03.962985 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:47:07 crc kubenswrapper[4767]: E0128 18:47:07.195174 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8" Jan 28 18:47:07 crc kubenswrapper[4767]: E0128 18:47:07.195936 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-x4lgf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-5fb775575f-qcsg7_openstack-operators(4129a635-943e-4417-8934-24c408083149): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:07 crc kubenswrapper[4767]: E0128 18:47:07.197248 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" podUID="4129a635-943e-4417-8934-24c408083149" Jan 28 18:47:07 crc kubenswrapper[4767]: E0128 18:47:07.974311 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:027cd7ab61ef5071d9ad6b729c95a98e51cd254642f01dc019d44cc98a9232f8\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" podUID="4129a635-943e-4417-8934-24c408083149" Jan 28 18:47:10 crc kubenswrapper[4767]: E0128 18:47:10.936874 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/neutron-operator@sha256:22665b40ffeef62d1a612c1f9f0fa8e97ff95085fad123895d786b770f421fc0" Jan 28 18:47:10 crc kubenswrapper[4767]: E0128 18:47:10.937063 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/neutron-operator@sha256:22665b40ffeef62d1a612c1f9f0fa8e97ff95085fad123895d786b770f421fc0,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-gftgg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-694c5bfc85-vsdzj_openstack-operators(58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:10 crc kubenswrapper[4767]: E0128 18:47:10.938253 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" podUID="58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8" Jan 28 18:47:10 crc kubenswrapper[4767]: E0128 18:47:10.990344 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/neutron-operator@sha256:22665b40ffeef62d1a612c1f9f0fa8e97ff95085fad123895d786b770f421fc0\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" podUID="58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8" Jan 28 18:47:11 crc kubenswrapper[4767]: E0128 18:47:11.575961 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488" Jan 28 18:47:11 crc kubenswrapper[4767]: E0128 18:47:11.576219 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7d8kp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-5b964cf4cd-b52rs_openstack-operators(a5d0d7c1-8591-4619-912c-8db740ebd050): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:11 crc kubenswrapper[4767]: E0128 18:47:11.577434 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" podUID="a5d0d7c1-8591-4619-912c-8db740ebd050" Jan 28 18:47:12 crc kubenswrapper[4767]: E0128 18:47:12.007356 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:e0824d5d461ada59715eb3048ed9394c80abba09c45503f8f90ee3b34e525488\\\"\"" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" podUID="a5d0d7c1-8591-4619-912c-8db740ebd050" Jan 28 18:47:12 crc kubenswrapper[4767]: E0128 18:47:12.015309 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2" Jan 28 18:47:12 crc kubenswrapper[4767]: E0128 18:47:12.015513 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cvmxx,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-xjfz2_openstack-operators(83eb3c39-bbf1-4059-ae27-c3a8aac5ad69): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:12 crc kubenswrapper[4767]: E0128 18:47:12.017409 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" podUID="83eb3c39-bbf1-4059-ae27-c3a8aac5ad69" Jan 28 18:47:13 crc kubenswrapper[4767]: E0128 18:47:13.007872 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" podUID="83eb3c39-bbf1-4059-ae27-c3a8aac5ad69" Jan 28 18:47:16 crc kubenswrapper[4767]: E0128 18:47:16.365454 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/nova-operator@sha256:a992613466db3478a00c20c28639c4a12f6326aa52c40a418d1ec40038c83b61" Jan 28 18:47:16 crc kubenswrapper[4767]: E0128 18:47:16.365962 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/nova-operator@sha256:a992613466db3478a00c20c28639c4a12f6326aa52c40a418d1ec40038c83b61,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-d8jzv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-ddcbfd695-mrhz5_openstack-operators(a0962425-fb2a-4acc-966b-544669cd2dc6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:16 crc kubenswrapper[4767]: E0128 18:47:16.367190 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" podUID="a0962425-fb2a-4acc-966b-544669cd2dc6" Jan 28 18:47:17 crc kubenswrapper[4767]: E0128 18:47:17.087570 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/nova-operator@sha256:a992613466db3478a00c20c28639c4a12f6326aa52c40a418d1ec40038c83b61\\\"\"" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" podUID="a0962425-fb2a-4acc-966b-544669cd2dc6" Jan 28 18:47:17 crc kubenswrapper[4767]: E0128 18:47:17.308827 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/lmiccini/keystone-operator@sha256:45ef0b95f941479535575b3d2cabb58a52e1d8490eed3da1bca9acd49344a722" Jan 28 18:47:17 crc kubenswrapper[4767]: E0128 18:47:17.309008 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/lmiccini/keystone-operator@sha256:45ef0b95f941479535575b3d2cabb58a52e1d8490eed3da1bca9acd49344a722,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xsrs4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-6978b79747-l4575_openstack-operators(898fb09e-f084-45e3-88bd-7a67ef198bee): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:47:17 crc kubenswrapper[4767]: E0128 18:47:17.310314 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" podUID="898fb09e-f084-45e3-88bd-7a67ef198bee" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.051972 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" event={"ID":"77bf9c1d-826f-418d-8a94-80e4d46cc051","Type":"ContainerStarted","Data":"82b8347ce4a2dd4857b6e76546143faabde444a58210cfad43b7776ea5f9c676"} Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.057914 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" event={"ID":"fbe04280-2c8a-4f54-9442-26fb1f381358","Type":"ContainerStarted","Data":"aeae9b3a5bb6be87314efafd924765ce5b53fdf2c1c0f5fb91d8211622c728e5"} Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.058637 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.069114 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" event={"ID":"14052357-c381-4e6b-ad51-13179cd09877","Type":"ContainerStarted","Data":"937f4e72042a7dca2e730bc9e19832573a591c87ca9429abff69c24bb00b9888"} Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.069720 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.076940 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.080022 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" event={"ID":"3bbfc3c2-f654-4728-8bc0-da11b96d4246","Type":"ContainerStarted","Data":"a4a0f50428bb2de4706221cf6f379b8e9788bcbde5f3b53eecca4287b4b8cd4c"} Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.080099 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.099065 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" podStartSLOduration=6.20102729 podStartE2EDuration="32.099043011s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.746795523 +0000 UTC m=+1014.710978397" lastFinishedPulling="2026-01-28 18:47:14.644811244 +0000 UTC m=+1040.608994118" observedRunningTime="2026-01-28 18:47:18.08876553 +0000 UTC m=+1044.052948414" watchObservedRunningTime="2026-01-28 18:47:18.099043011 +0000 UTC m=+1044.063225885" Jan 28 18:47:18 crc kubenswrapper[4767]: E0128 18:47:18.115841 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/lmiccini/keystone-operator@sha256:45ef0b95f941479535575b3d2cabb58a52e1d8490eed3da1bca9acd49344a722\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" podUID="898fb09e-f084-45e3-88bd-7a67ef198bee" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.120699 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd"] Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.120925 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" podStartSLOduration=9.900389017 podStartE2EDuration="32.120906705s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.712050876 +0000 UTC m=+1014.676233750" lastFinishedPulling="2026-01-28 18:47:10.932568564 +0000 UTC m=+1036.896751438" observedRunningTime="2026-01-28 18:47:18.115612429 +0000 UTC m=+1044.079795313" watchObservedRunningTime="2026-01-28 18:47:18.120906705 +0000 UTC m=+1044.085089579" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.167878 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" podStartSLOduration=5.788851147 podStartE2EDuration="32.167853525s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.264601997 +0000 UTC m=+1014.228784871" lastFinishedPulling="2026-01-28 18:47:14.643603965 +0000 UTC m=+1040.607787249" observedRunningTime="2026-01-28 18:47:18.15395818 +0000 UTC m=+1044.118141064" watchObservedRunningTime="2026-01-28 18:47:18.167853525 +0000 UTC m=+1044.132036399" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.186884 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" podStartSLOduration=8.51705486 podStartE2EDuration="32.186864069s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.337451557 +0000 UTC m=+1014.301634431" lastFinishedPulling="2026-01-28 18:47:12.007260766 +0000 UTC m=+1037.971443640" observedRunningTime="2026-01-28 18:47:18.182323146 +0000 UTC m=+1044.146506020" watchObservedRunningTime="2026-01-28 18:47:18.186864069 +0000 UTC m=+1044.151046943" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.248975 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848"] Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.935596 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:47:18 crc kubenswrapper[4767]: I0128 18:47:18.946510 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d-cert\") pod \"infra-operator-controller-manager-79955696d6-gkt4m\" (UID: \"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d\") " pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.086747 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" event={"ID":"0d7c50d3-1348-43e5-a8fa-f05cd53d2a42","Type":"ContainerStarted","Data":"ef86150552a3a4f876fe7fedf1270c97d7879f474bb7fa13e2cdf0ed1c8746d7"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.088221 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.089399 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" event={"ID":"b48deaef-0712-425b-8d49-133c4931ea06","Type":"ContainerStarted","Data":"318cc82c47460f7b6022576f56f008e284fb748e7b2d22ef17c0fac01e2e069a"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.089858 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.100026 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" event={"ID":"e3793181-a73c-494b-ba12-a1a908a7d6f5","Type":"ContainerStarted","Data":"052b422a0b4a04f95031738af9c6b2ea181293b9e37939df5972b64acd8b8fcb"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.112479 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" event={"ID":"b60452a4-90a3-492d-be1d-f481ed0fce75","Type":"ContainerStarted","Data":"79c04a1fc49700acdb5b6331b4a8ae1fad2d03f47b1dd2aa2ac01a93ad173e6c"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.113282 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.120089 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" podStartSLOduration=2.706358193 podStartE2EDuration="32.120071394s" podCreationTimestamp="2026-01-28 18:46:47 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.959180308 +0000 UTC m=+1014.923363182" lastFinishedPulling="2026-01-28 18:47:18.372893509 +0000 UTC m=+1044.337076383" observedRunningTime="2026-01-28 18:47:19.11770608 +0000 UTC m=+1045.081888964" watchObservedRunningTime="2026-01-28 18:47:19.120071394 +0000 UTC m=+1045.084254278" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.121482 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" event={"ID":"4e17b611-fe05-4f69-b64e-b1abb213b297","Type":"ContainerStarted","Data":"0c868b1fb05ca02107e81ee6bbb276638625fdd169a6c2d4a97cc52337a481ac"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.122225 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.135516 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" event={"ID":"e2672b52-b1cf-491e-8f76-46c22b19fbbf","Type":"ContainerStarted","Data":"412bbc94fd9c2d01d246f94bf59587a54f76c542c86c32b540b41713b37e167c"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.136235 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.150886 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" event={"ID":"ccee4b83-1a09-4828-b370-5fb768476acc","Type":"ContainerStarted","Data":"d721b9bd4fe7e02a73f9f0bc910bfc562fb2a17cc18928aa54d6d566fc8cc5ba"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.151725 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.153122 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" event={"ID":"964b21b6-27a7-4fa9-9f44-ddb1484e7266","Type":"ContainerStarted","Data":"d2302d09c521cbb660351e5bfbc36030327caf669c1454c408cc44105b9c6bfa"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.153146 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" event={"ID":"964b21b6-27a7-4fa9-9f44-ddb1484e7266","Type":"ContainerStarted","Data":"092bb2d7e5f10acb19c283b5980a676522e4c21c2049c196b7f3d4db5647472d"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.153616 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.154588 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" event={"ID":"17bdefa0-fed1-4dd5-abde-10d52eebbdb8","Type":"ContainerStarted","Data":"bfd0fb342597e001b8e2cdb424dd2ed13495e7eaab6b9a57c942446347549008"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.155039 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.155915 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" event={"ID":"56088743-c7ed-4882-b2e5-0845caba050e","Type":"ContainerStarted","Data":"0409e10f4e81d889d1404bd9e49179005a85025c7e6d0d0eb427bfcd7483956e"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.156291 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.156954 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" event={"ID":"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b","Type":"ContainerStarted","Data":"e176370348d2edbe2ea59e64447bb80f2ef38b2c01be7b65410940e56dbe8e83"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.177774 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" event={"ID":"3355ea8c-1093-449e-9a8d-a4598f46242c","Type":"ContainerStarted","Data":"a30790c1408b4772efa2ae2bc6b3b696625fdc9fb5895460c5e3e7a4f96b1ff1"} Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.178237 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.179273 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.198627 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" podStartSLOduration=3.473754361 podStartE2EDuration="32.198599791s" podCreationTimestamp="2026-01-28 18:46:47 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.958738974 +0000 UTC m=+1014.922921848" lastFinishedPulling="2026-01-28 18:47:17.683584404 +0000 UTC m=+1043.647767278" observedRunningTime="2026-01-28 18:47:19.187545095 +0000 UTC m=+1045.151727979" watchObservedRunningTime="2026-01-28 18:47:19.198599791 +0000 UTC m=+1045.162782665" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.206438 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-hbrmh" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.215128 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.228565 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" podStartSLOduration=4.895752724 podStartE2EDuration="33.228538828s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.584228307 +0000 UTC m=+1014.548411181" lastFinishedPulling="2026-01-28 18:47:16.917014411 +0000 UTC m=+1042.881197285" observedRunningTime="2026-01-28 18:47:19.21326878 +0000 UTC m=+1045.177451654" watchObservedRunningTime="2026-01-28 18:47:19.228538828 +0000 UTC m=+1045.192721702" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.251929 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" podStartSLOduration=12.789277617 podStartE2EDuration="33.251911839s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:47.818739068 +0000 UTC m=+1013.782921942" lastFinishedPulling="2026-01-28 18:47:08.28137329 +0000 UTC m=+1034.245556164" observedRunningTime="2026-01-28 18:47:19.249746581 +0000 UTC m=+1045.213929465" watchObservedRunningTime="2026-01-28 18:47:19.251911839 +0000 UTC m=+1045.216094713" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.283219 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" podStartSLOduration=3.3399529709999998 podStartE2EDuration="33.283185988s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.12850808 +0000 UTC m=+1014.092690954" lastFinishedPulling="2026-01-28 18:47:18.071741097 +0000 UTC m=+1044.035923971" observedRunningTime="2026-01-28 18:47:19.280488163 +0000 UTC m=+1045.244671037" watchObservedRunningTime="2026-01-28 18:47:19.283185988 +0000 UTC m=+1045.247368862" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.368095 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" podStartSLOduration=3.929534892 podStartE2EDuration="32.368077884s" podCreationTimestamp="2026-01-28 18:46:47 +0000 UTC" firstStartedPulling="2026-01-28 18:46:49.189321398 +0000 UTC m=+1015.153504272" lastFinishedPulling="2026-01-28 18:47:17.62786439 +0000 UTC m=+1043.592047264" observedRunningTime="2026-01-28 18:47:19.334733091 +0000 UTC m=+1045.298915955" watchObservedRunningTime="2026-01-28 18:47:19.368077884 +0000 UTC m=+1045.332260758" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.403720 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" podStartSLOduration=3.087023296 podStartE2EDuration="33.403701288s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:47.804656797 +0000 UTC m=+1013.768839661" lastFinishedPulling="2026-01-28 18:47:18.121334779 +0000 UTC m=+1044.085517653" observedRunningTime="2026-01-28 18:47:19.398769694 +0000 UTC m=+1045.362952568" watchObservedRunningTime="2026-01-28 18:47:19.403701288 +0000 UTC m=+1045.367884162" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.408467 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" podStartSLOduration=5.41919085 podStartE2EDuration="33.408448846s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.939942416 +0000 UTC m=+1014.904125300" lastFinishedPulling="2026-01-28 18:47:16.929200392 +0000 UTC m=+1042.893383296" observedRunningTime="2026-01-28 18:47:19.375196276 +0000 UTC m=+1045.339379150" watchObservedRunningTime="2026-01-28 18:47:19.408448846 +0000 UTC m=+1045.372631720" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.430402 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" podStartSLOduration=3.015975064 podStartE2EDuration="33.430385603s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:47.958685616 +0000 UTC m=+1013.922868490" lastFinishedPulling="2026-01-28 18:47:18.373096155 +0000 UTC m=+1044.337279029" observedRunningTime="2026-01-28 18:47:19.428943378 +0000 UTC m=+1045.393126252" watchObservedRunningTime="2026-01-28 18:47:19.430385603 +0000 UTC m=+1045.394568477" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.448611 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" podStartSLOduration=10.70671018 podStartE2EDuration="32.448586322s" podCreationTimestamp="2026-01-28 18:46:47 +0000 UTC" firstStartedPulling="2026-01-28 18:46:49.186611553 +0000 UTC m=+1015.150794427" lastFinishedPulling="2026-01-28 18:47:10.928487695 +0000 UTC m=+1036.892670569" observedRunningTime="2026-01-28 18:47:19.445856557 +0000 UTC m=+1045.410039431" watchObservedRunningTime="2026-01-28 18:47:19.448586322 +0000 UTC m=+1045.412769196" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.536819 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" podStartSLOduration=32.536797652 podStartE2EDuration="32.536797652s" podCreationTimestamp="2026-01-28 18:46:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:47:19.528322527 +0000 UTC m=+1045.492505401" watchObservedRunningTime="2026-01-28 18:47:19.536797652 +0000 UTC m=+1045.500980526" Jan 28 18:47:19 crc kubenswrapper[4767]: I0128 18:47:19.841271 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m"] Jan 28 18:47:20 crc kubenswrapper[4767]: I0128 18:47:20.194309 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" event={"ID":"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d","Type":"ContainerStarted","Data":"e23135ab386ed571197beca901b6564ed733ba431452ff5d7501ec0deec675b5"} Jan 28 18:47:23 crc kubenswrapper[4767]: I0128 18:47:23.969358 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-77dc76679b-k6848" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.219542 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" event={"ID":"c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d","Type":"ContainerStarted","Data":"bee68dcd1c6a01efb69e4ab5c62d6c143abca9fc943d9793d257a6dbaa67c6d2"} Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.219623 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.221554 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" event={"ID":"58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8","Type":"ContainerStarted","Data":"a10d68372535e27b78a17dffe5e4449db1a581d5be35ead01eabd99558c32ed6"} Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.221828 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.223271 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" event={"ID":"ac7dbf1c-e4ce-4b04-8723-6166810cdf9b","Type":"ContainerStarted","Data":"ac59b507aa22ad6db05e56d30e2b8dcb12486e752e1a8b951be6fc6053ef7e18"} Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.223397 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.224835 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" event={"ID":"4129a635-943e-4417-8934-24c408083149","Type":"ContainerStarted","Data":"516f4bd23ca35264ec452412ceaa8fb6e6fcb4863f7e8371e8357e677f1ed225"} Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.225155 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.238458 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" podStartSLOduration=34.934733157 podStartE2EDuration="38.238438065s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:47:19.86338954 +0000 UTC m=+1045.827572414" lastFinishedPulling="2026-01-28 18:47:23.167094448 +0000 UTC m=+1049.131277322" observedRunningTime="2026-01-28 18:47:24.235632608 +0000 UTC m=+1050.199815482" watchObservedRunningTime="2026-01-28 18:47:24.238438065 +0000 UTC m=+1050.202620939" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.286831 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" podStartSLOduration=33.319554645 podStartE2EDuration="38.286804819s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:47:18.168475683 +0000 UTC m=+1044.132658557" lastFinishedPulling="2026-01-28 18:47:23.135725857 +0000 UTC m=+1049.099908731" observedRunningTime="2026-01-28 18:47:24.278880621 +0000 UTC m=+1050.243063495" watchObservedRunningTime="2026-01-28 18:47:24.286804819 +0000 UTC m=+1050.250987693" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.309287 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" podStartSLOduration=3.701233214 podStartE2EDuration="38.309268642s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.561869178 +0000 UTC m=+1014.526052052" lastFinishedPulling="2026-01-28 18:47:23.169904606 +0000 UTC m=+1049.134087480" observedRunningTime="2026-01-28 18:47:24.305682109 +0000 UTC m=+1050.269864983" watchObservedRunningTime="2026-01-28 18:47:24.309268642 +0000 UTC m=+1050.273451516" Jan 28 18:47:24 crc kubenswrapper[4767]: I0128 18:47:24.323439 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" podStartSLOduration=3.451102787 podStartE2EDuration="38.323422934s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.264569006 +0000 UTC m=+1014.228751890" lastFinishedPulling="2026-01-28 18:47:23.136889173 +0000 UTC m=+1049.101072037" observedRunningTime="2026-01-28 18:47:24.322127593 +0000 UTC m=+1050.286310467" watchObservedRunningTime="2026-01-28 18:47:24.323422934 +0000 UTC m=+1050.287605798" Jan 28 18:47:25 crc kubenswrapper[4767]: I0128 18:47:25.231860 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" event={"ID":"a5d0d7c1-8591-4619-912c-8db740ebd050","Type":"ContainerStarted","Data":"9668e5793e17d5b64331e88284f9635f3315a932717ff0b578ddaacf2693a544"} Jan 28 18:47:25 crc kubenswrapper[4767]: I0128 18:47:25.232076 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" Jan 28 18:47:25 crc kubenswrapper[4767]: I0128 18:47:25.233479 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" event={"ID":"83eb3c39-bbf1-4059-ae27-c3a8aac5ad69","Type":"ContainerStarted","Data":"1200c351e21a9e4f7cc5afcb7303351de6ac2c2c5c4f2e50561481a57566a74f"} Jan 28 18:47:25 crc kubenswrapper[4767]: I0128 18:47:25.286439 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" podStartSLOduration=3.045843525 podStartE2EDuration="38.286421072s" podCreationTimestamp="2026-01-28 18:46:47 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.956717831 +0000 UTC m=+1014.920900705" lastFinishedPulling="2026-01-28 18:47:24.197295378 +0000 UTC m=+1050.161478252" observedRunningTime="2026-01-28 18:47:25.250634493 +0000 UTC m=+1051.214817367" watchObservedRunningTime="2026-01-28 18:47:25.286421072 +0000 UTC m=+1051.250603946" Jan 28 18:47:25 crc kubenswrapper[4767]: I0128 18:47:25.290323 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-xjfz2" podStartSLOduration=3.155144104 podStartE2EDuration="38.290297893s" podCreationTimestamp="2026-01-28 18:46:47 +0000 UTC" firstStartedPulling="2026-01-28 18:46:49.211529823 +0000 UTC m=+1015.175712687" lastFinishedPulling="2026-01-28 18:47:24.346683602 +0000 UTC m=+1050.310866476" observedRunningTime="2026-01-28 18:47:25.284387909 +0000 UTC m=+1051.248570793" watchObservedRunningTime="2026-01-28 18:47:25.290297893 +0000 UTC m=+1051.254480777" Jan 28 18:47:26 crc kubenswrapper[4767]: I0128 18:47:26.925946 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-6bc7f4f4cf-xctgp" Jan 28 18:47:26 crc kubenswrapper[4767]: I0128 18:47:26.966008 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-f6487bd57-9hvzg" Jan 28 18:47:26 crc kubenswrapper[4767]: I0128 18:47:26.988370 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-66dfbd6f5d-x8rlq" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.018791 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-6db5dbd896-kb8qp" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.190900 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-587c6bfdcf-bhkrp" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.426504 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-958664b5-4prpg" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.564476 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-765668569f-98mkp" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.607154 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-67bf948998-222q9" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.749253 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-5c765b4558-v7nnq" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.825521 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-788c46999f-hq5gx" Jan 28 18:47:27 crc kubenswrapper[4767]: I0128 18:47:27.971096 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-56f8bfcd9f-tkgjs" Jan 28 18:47:28 crc kubenswrapper[4767]: I0128 18:47:28.023513 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-68fc8c869-w4dzg" Jan 28 18:47:28 crc kubenswrapper[4767]: I0128 18:47:28.051775 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-79f6f75b9c-dhf5c" Jan 28 18:47:28 crc kubenswrapper[4767]: I0128 18:47:28.315626 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-767b8bc766-wvg8s" Jan 28 18:47:29 crc kubenswrapper[4767]: I0128 18:47:29.223066 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-79955696d6-gkt4m" Jan 28 18:47:33 crc kubenswrapper[4767]: I0128 18:47:33.778725 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd" Jan 28 18:47:37 crc kubenswrapper[4767]: I0128 18:47:37.341420 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5fb775575f-qcsg7" Jan 28 18:47:37 crc kubenswrapper[4767]: I0128 18:47:37.627554 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-694c5bfc85-vsdzj" Jan 28 18:47:37 crc kubenswrapper[4767]: I0128 18:47:37.968735 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5b964cf4cd-b52rs" Jan 28 18:47:42 crc kubenswrapper[4767]: I0128 18:47:42.374841 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" event={"ID":"a0962425-fb2a-4acc-966b-544669cd2dc6","Type":"ContainerStarted","Data":"708780c7439b32c5b0e3e89a1a0de8a2142ae7937a0a38efdf58f9e071f50ede"} Jan 28 18:47:42 crc kubenswrapper[4767]: I0128 18:47:42.375740 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" Jan 28 18:47:42 crc kubenswrapper[4767]: I0128 18:47:42.376371 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" event={"ID":"898fb09e-f084-45e3-88bd-7a67ef198bee","Type":"ContainerStarted","Data":"45ec0410a8346090d562f15f057dc2f70ceadfc192865c556d09166a354b3db9"} Jan 28 18:47:42 crc kubenswrapper[4767]: I0128 18:47:42.376552 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" Jan 28 18:47:42 crc kubenswrapper[4767]: I0128 18:47:42.393225 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" podStartSLOduration=3.735710322 podStartE2EDuration="56.393189194s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.933266507 +0000 UTC m=+1014.897449381" lastFinishedPulling="2026-01-28 18:47:41.590745379 +0000 UTC m=+1067.554928253" observedRunningTime="2026-01-28 18:47:42.390536061 +0000 UTC m=+1068.354718945" watchObservedRunningTime="2026-01-28 18:47:42.393189194 +0000 UTC m=+1068.357372068" Jan 28 18:47:42 crc kubenswrapper[4767]: I0128 18:47:42.407109 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" podStartSLOduration=3.231340553 podStartE2EDuration="56.407087169s" podCreationTimestamp="2026-01-28 18:46:46 +0000 UTC" firstStartedPulling="2026-01-28 18:46:48.414050313 +0000 UTC m=+1014.378233187" lastFinishedPulling="2026-01-28 18:47:41.589796929 +0000 UTC m=+1067.553979803" observedRunningTime="2026-01-28 18:47:42.407083658 +0000 UTC m=+1068.371266532" watchObservedRunningTime="2026-01-28 18:47:42.407087169 +0000 UTC m=+1068.371270043" Jan 28 18:47:47 crc kubenswrapper[4767]: I0128 18:47:47.471578 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-6978b79747-l4575" Jan 28 18:47:47 crc kubenswrapper[4767]: I0128 18:47:47.701968 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-ddcbfd695-mrhz5" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.612166 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wdmbx"] Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.614023 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.617145 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-6d9dp" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.617244 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.617437 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.617476 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.631019 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wdmbx"] Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.668470 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-925qz"] Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.670558 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.679716 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.685200 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-925qz"] Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.742655 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vrbbs\" (UniqueName: \"kubernetes.io/projected/6dcf2967-51f6-4489-8f2d-3028e5d3b808-kube-api-access-vrbbs\") pod \"dnsmasq-dns-675f4bcbfc-wdmbx\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.742854 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6dcf2967-51f6-4489-8f2d-3028e5d3b808-config\") pod \"dnsmasq-dns-675f4bcbfc-wdmbx\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.844816 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vrbbs\" (UniqueName: \"kubernetes.io/projected/6dcf2967-51f6-4489-8f2d-3028e5d3b808-kube-api-access-vrbbs\") pod \"dnsmasq-dns-675f4bcbfc-wdmbx\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.844988 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6dcf2967-51f6-4489-8f2d-3028e5d3b808-config\") pod \"dnsmasq-dns-675f4bcbfc-wdmbx\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.845043 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzfgk\" (UniqueName: \"kubernetes.io/projected/80a523d6-89d8-4bcb-a7d0-1ab05465897d-kube-api-access-tzfgk\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.845205 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.845503 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-config\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.846283 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6dcf2967-51f6-4489-8f2d-3028e5d3b808-config\") pod \"dnsmasq-dns-675f4bcbfc-wdmbx\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.867461 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vrbbs\" (UniqueName: \"kubernetes.io/projected/6dcf2967-51f6-4489-8f2d-3028e5d3b808-kube-api-access-vrbbs\") pod \"dnsmasq-dns-675f4bcbfc-wdmbx\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.932640 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.947022 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzfgk\" (UniqueName: \"kubernetes.io/projected/80a523d6-89d8-4bcb-a7d0-1ab05465897d-kube-api-access-tzfgk\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.947111 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.947155 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-config\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.948320 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-config\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.949028 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.978166 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzfgk\" (UniqueName: \"kubernetes.io/projected/80a523d6-89d8-4bcb-a7d0-1ab05465897d-kube-api-access-tzfgk\") pod \"dnsmasq-dns-78dd6ddcc-925qz\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:03 crc kubenswrapper[4767]: I0128 18:48:03.990447 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:04 crc kubenswrapper[4767]: I0128 18:48:04.336927 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-925qz"] Jan 28 18:48:04 crc kubenswrapper[4767]: I0128 18:48:04.417521 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wdmbx"] Jan 28 18:48:04 crc kubenswrapper[4767]: W0128 18:48:04.421983 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6dcf2967_51f6_4489_8f2d_3028e5d3b808.slice/crio-d3ff9d8a2d7eaabe9a02b01e3ff26c54d573592fb260723296e5ddda28b7ff3b WatchSource:0}: Error finding container d3ff9d8a2d7eaabe9a02b01e3ff26c54d573592fb260723296e5ddda28b7ff3b: Status 404 returned error can't find the container with id d3ff9d8a2d7eaabe9a02b01e3ff26c54d573592fb260723296e5ddda28b7ff3b Jan 28 18:48:04 crc kubenswrapper[4767]: I0128 18:48:04.525576 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" event={"ID":"80a523d6-89d8-4bcb-a7d0-1ab05465897d","Type":"ContainerStarted","Data":"0ff2c26512741a9475ede3605d0f16901115dbec9c101605b7b5bbfbace3ac8a"} Jan 28 18:48:04 crc kubenswrapper[4767]: I0128 18:48:04.527379 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" event={"ID":"6dcf2967-51f6-4489-8f2d-3028e5d3b808","Type":"ContainerStarted","Data":"d3ff9d8a2d7eaabe9a02b01e3ff26c54d573592fb260723296e5ddda28b7ff3b"} Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.393107 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wdmbx"] Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.429025 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j6m9p"] Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.430573 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.438930 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j6m9p"] Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.600521 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.600569 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4tsz\" (UniqueName: \"kubernetes.io/projected/cb9ed350-6e31-4065-992e-e10644be1c7d-kube-api-access-b4tsz\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.600599 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-config\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.703443 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.703511 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4tsz\" (UniqueName: \"kubernetes.io/projected/cb9ed350-6e31-4065-992e-e10644be1c7d-kube-api-access-b4tsz\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.703553 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-config\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.704871 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-config\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.707627 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-dns-svc\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.725314 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4tsz\" (UniqueName: \"kubernetes.io/projected/cb9ed350-6e31-4065-992e-e10644be1c7d-kube-api-access-b4tsz\") pod \"dnsmasq-dns-666b6646f7-j6m9p\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.774596 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.872094 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-925qz"] Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.920714 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nbvnb"] Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.928872 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:06 crc kubenswrapper[4767]: I0128 18:48:06.939101 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nbvnb"] Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.020001 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-config\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.020092 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.020173 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx7lw\" (UniqueName: \"kubernetes.io/projected/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-kube-api-access-hx7lw\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.121343 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx7lw\" (UniqueName: \"kubernetes.io/projected/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-kube-api-access-hx7lw\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.121421 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-config\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.121473 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.122622 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.125160 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-config\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.139115 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx7lw\" (UniqueName: \"kubernetes.io/projected/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-kube-api-access-hx7lw\") pod \"dnsmasq-dns-57d769cc4f-nbvnb\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.262421 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.351981 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j6m9p"] Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.582782 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" event={"ID":"cb9ed350-6e31-4065-992e-e10644be1c7d","Type":"ContainerStarted","Data":"545238ffd630a1f772281f31b612d03ec38fed325fffbb1ad8531333db627529"} Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.677379 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.678616 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.687998 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.688153 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-2tqzw" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.688286 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.688351 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.688457 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.688529 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.694790 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.706426 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836194 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836318 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krbbr\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-kube-api-access-krbbr\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836342 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836374 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836398 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bbea8b85-5bb2-4570-83e7-07dafaade001-pod-info\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836569 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-config-data\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836632 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bbea8b85-5bb2-4570-83e7-07dafaade001-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836665 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836697 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-server-conf\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836730 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.836768 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.870171 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nbvnb"] Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.938493 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krbbr\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-kube-api-access-krbbr\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.938547 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.938617 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.938662 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bbea8b85-5bb2-4570-83e7-07dafaade001-pod-info\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939139 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.938700 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-config-data\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939285 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bbea8b85-5bb2-4570-83e7-07dafaade001-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939313 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939338 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-server-conf\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939360 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939394 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939432 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939571 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939591 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.939761 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-config-data\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.940115 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.940753 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-server-conf\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.955682 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.977724 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.978022 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bbea8b85-5bb2-4570-83e7-07dafaade001-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.993002 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krbbr\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-kube-api-access-krbbr\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:07 crc kubenswrapper[4767]: I0128 18:48:07.996632 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bbea8b85-5bb2-4570-83e7-07dafaade001-pod-info\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.100338 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " pod="openstack/rabbitmq-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.102667 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.108644 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.108887 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.113373 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.113761 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.113983 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-h7ff2" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.114219 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.114441 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.114602 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.116610 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.254455 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.254534 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.254668 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.254754 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.254781 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlc8r\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-kube-api-access-vlc8r\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.254921 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.255120 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.255185 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.255319 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.255350 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.255371 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.318803 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.361477 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.361587 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.361612 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlc8r\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-kube-api-access-vlc8r\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.361952 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.362117 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.364866 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.364933 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.365012 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.365041 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.365057 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.365138 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.365172 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.365594 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.365674 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.366502 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.367461 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.371168 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.372404 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.376665 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.380564 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.380803 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.402987 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlc8r\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-kube-api-access-vlc8r\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.434493 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.457132 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:48:08 crc kubenswrapper[4767]: I0128 18:48:08.589967 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" event={"ID":"bac6b5c2-50a2-4af9-a1d0-2542da258fe4","Type":"ContainerStarted","Data":"0f72093db8853c5c95cc218a9b0ec6352332c92396960b4a3a432b88f1aa6966"} Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.158238 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.197637 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.199465 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.213060 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.213368 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-rdvvv" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.213366 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.213522 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.214659 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.232724 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.293167 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.294365 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/837cd3ca-8015-435a-b908-0b46125d68ae-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.294571 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/837cd3ca-8015-435a-b908-0b46125d68ae-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.294659 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-operator-scripts\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.294730 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-config-data-default\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.294767 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-kolla-config\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.295008 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/837cd3ca-8015-435a-b908-0b46125d68ae-config-data-generated\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.295074 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.295139 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8rc96\" (UniqueName: \"kubernetes.io/projected/837cd3ca-8015-435a-b908-0b46125d68ae-kube-api-access-8rc96\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: W0128 18:48:09.342904 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbbea8b85_5bb2_4570_83e7_07dafaade001.slice/crio-eac80ddb3c1ad774281824ad7d736f1e5e935cbefe4754a2226078a580883179 WatchSource:0}: Error finding container eac80ddb3c1ad774281824ad7d736f1e5e935cbefe4754a2226078a580883179: Status 404 returned error can't find the container with id eac80ddb3c1ad774281824ad7d736f1e5e935cbefe4754a2226078a580883179 Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396105 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-config-data-default\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396155 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-kolla-config\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396241 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/837cd3ca-8015-435a-b908-0b46125d68ae-config-data-generated\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396275 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396320 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8rc96\" (UniqueName: \"kubernetes.io/projected/837cd3ca-8015-435a-b908-0b46125d68ae-kube-api-access-8rc96\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396352 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/837cd3ca-8015-435a-b908-0b46125d68ae-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396406 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/837cd3ca-8015-435a-b908-0b46125d68ae-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.396438 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-operator-scripts\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.397532 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-config-data-default\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.397942 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.398573 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-operator-scripts\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.398584 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/837cd3ca-8015-435a-b908-0b46125d68ae-kolla-config\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.398863 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/837cd3ca-8015-435a-b908-0b46125d68ae-config-data-generated\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.405931 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/837cd3ca-8015-435a-b908-0b46125d68ae-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.416308 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8rc96\" (UniqueName: \"kubernetes.io/projected/837cd3ca-8015-435a-b908-0b46125d68ae-kube-api-access-8rc96\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.425950 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/837cd3ca-8015-435a-b908-0b46125d68ae-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.428439 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-galera-0\" (UID: \"837cd3ca-8015-435a-b908-0b46125d68ae\") " pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.570472 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.619777 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd","Type":"ContainerStarted","Data":"d3b8598d2dbb8ad488a8d5ce6e54f8a171ee14c28da972e5bfd2b3ed853c1ac2"} Jan 28 18:48:09 crc kubenswrapper[4767]: I0128 18:48:09.621763 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bbea8b85-5bb2-4570-83e7-07dafaade001","Type":"ContainerStarted","Data":"eac80ddb3c1ad774281824ad7d736f1e5e935cbefe4754a2226078a580883179"} Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.299053 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.376857 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.383953 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.389140 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-lv8rv" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.390118 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.390790 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.391276 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.401954 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.516797 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7llls\" (UniqueName: \"kubernetes.io/projected/ae717a60-f106-49ec-abaa-3be941f7f907-kube-api-access-7llls\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.516883 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.516915 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.516962 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ae717a60-f106-49ec-abaa-3be941f7f907-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.516987 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae717a60-f106-49ec-abaa-3be941f7f907-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.517056 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.517083 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae717a60-f106-49ec-abaa-3be941f7f907-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.517159 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618576 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618659 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7llls\" (UniqueName: \"kubernetes.io/projected/ae717a60-f106-49ec-abaa-3be941f7f907-kube-api-access-7llls\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618695 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618721 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618747 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ae717a60-f106-49ec-abaa-3be941f7f907-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618777 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae717a60-f106-49ec-abaa-3be941f7f907-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618817 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.618844 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae717a60-f106-49ec-abaa-3be941f7f907-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.623773 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/ae717a60-f106-49ec-abaa-3be941f7f907-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.624001 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.624078 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.624746 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.626775 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/ae717a60-f106-49ec-abaa-3be941f7f907-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.641177 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ae717a60-f106-49ec-abaa-3be941f7f907-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.641777 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/ae717a60-f106-49ec-abaa-3be941f7f907-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.671524 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7llls\" (UniqueName: \"kubernetes.io/projected/ae717a60-f106-49ec-abaa-3be941f7f907-kube-api-access-7llls\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.728389 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"openstack-cell1-galera-0\" (UID: \"ae717a60-f106-49ec-abaa-3be941f7f907\") " pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.852497 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.857634 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.868436 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.868713 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-pbf24" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.868822 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.877889 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.927463 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/07049746-7fe4-45ba-9a83-201db13c3de0-kolla-config\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.927517 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/07049746-7fe4-45ba-9a83-201db13c3de0-memcached-tls-certs\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.927542 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-597ks\" (UniqueName: \"kubernetes.io/projected/07049746-7fe4-45ba-9a83-201db13c3de0-kube-api-access-597ks\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.927692 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07049746-7fe4-45ba-9a83-201db13c3de0-combined-ca-bundle\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:10 crc kubenswrapper[4767]: I0128 18:48:10.927908 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/07049746-7fe4-45ba-9a83-201db13c3de0-config-data\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.025093 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.029729 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-597ks\" (UniqueName: \"kubernetes.io/projected/07049746-7fe4-45ba-9a83-201db13c3de0-kube-api-access-597ks\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.029788 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07049746-7fe4-45ba-9a83-201db13c3de0-combined-ca-bundle\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.029864 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/07049746-7fe4-45ba-9a83-201db13c3de0-config-data\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.029890 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/07049746-7fe4-45ba-9a83-201db13c3de0-kolla-config\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.029919 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/07049746-7fe4-45ba-9a83-201db13c3de0-memcached-tls-certs\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.031197 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/07049746-7fe4-45ba-9a83-201db13c3de0-config-data\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.032022 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/07049746-7fe4-45ba-9a83-201db13c3de0-kolla-config\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.036342 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/07049746-7fe4-45ba-9a83-201db13c3de0-combined-ca-bundle\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.053433 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/07049746-7fe4-45ba-9a83-201db13c3de0-memcached-tls-certs\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.082441 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-597ks\" (UniqueName: \"kubernetes.io/projected/07049746-7fe4-45ba-9a83-201db13c3de0-kube-api-access-597ks\") pod \"memcached-0\" (UID: \"07049746-7fe4-45ba-9a83-201db13c3de0\") " pod="openstack/memcached-0" Jan 28 18:48:11 crc kubenswrapper[4767]: I0128 18:48:11.199699 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 28 18:48:12 crc kubenswrapper[4767]: I0128 18:48:12.769980 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:48:12 crc kubenswrapper[4767]: I0128 18:48:12.771225 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 18:48:12 crc kubenswrapper[4767]: I0128 18:48:12.781676 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:48:12 crc kubenswrapper[4767]: I0128 18:48:12.785619 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-tc6k7" Jan 28 18:48:12 crc kubenswrapper[4767]: I0128 18:48:12.873163 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nsm6\" (UniqueName: \"kubernetes.io/projected/2c76158e-431b-4588-962f-0ac42843de29-kube-api-access-5nsm6\") pod \"kube-state-metrics-0\" (UID: \"2c76158e-431b-4588-962f-0ac42843de29\") " pod="openstack/kube-state-metrics-0" Jan 28 18:48:12 crc kubenswrapper[4767]: I0128 18:48:12.979799 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nsm6\" (UniqueName: \"kubernetes.io/projected/2c76158e-431b-4588-962f-0ac42843de29-kube-api-access-5nsm6\") pod \"kube-state-metrics-0\" (UID: \"2c76158e-431b-4588-962f-0ac42843de29\") " pod="openstack/kube-state-metrics-0" Jan 28 18:48:13 crc kubenswrapper[4767]: I0128 18:48:13.016311 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nsm6\" (UniqueName: \"kubernetes.io/projected/2c76158e-431b-4588-962f-0ac42843de29-kube-api-access-5nsm6\") pod \"kube-state-metrics-0\" (UID: \"2c76158e-431b-4588-962f-0ac42843de29\") " pod="openstack/kube-state-metrics-0" Jan 28 18:48:13 crc kubenswrapper[4767]: I0128 18:48:13.104660 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.315427 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dq7h8"] Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.321704 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.326653 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.326760 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-fx8rx" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.327074 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.331263 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dq7h8"] Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.371353 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-8cjhn"] Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.375681 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.396161 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-8cjhn"] Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.441619 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6px5\" (UniqueName: \"kubernetes.io/projected/79640773-c4bb-4add-83a8-f9a39873bdef-kube-api-access-g6px5\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.441696 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79640773-c4bb-4add-83a8-f9a39873bdef-combined-ca-bundle\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.441778 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/79640773-c4bb-4add-83a8-f9a39873bdef-scripts\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.441832 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/79640773-c4bb-4add-83a8-f9a39873bdef-ovn-controller-tls-certs\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.441857 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-log-ovn\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.441902 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-run-ovn\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.441938 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-run\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.543993 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-etc-ovs\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544068 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79640773-c4bb-4add-83a8-f9a39873bdef-combined-ca-bundle\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544275 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-log\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544323 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-run\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544354 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-lib\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544389 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/79640773-c4bb-4add-83a8-f9a39873bdef-scripts\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544446 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee92efdf-106a-4ca0-8e46-8122b820a0d1-scripts\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544483 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/79640773-c4bb-4add-83a8-f9a39873bdef-ovn-controller-tls-certs\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544507 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-log-ovn\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544557 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v6j4\" (UniqueName: \"kubernetes.io/projected/ee92efdf-106a-4ca0-8e46-8122b820a0d1-kube-api-access-2v6j4\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544590 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-run-ovn\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544642 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-run\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.544710 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6px5\" (UniqueName: \"kubernetes.io/projected/79640773-c4bb-4add-83a8-f9a39873bdef-kube-api-access-g6px5\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.546142 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-log-ovn\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.546259 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-run-ovn\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.546448 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/79640773-c4bb-4add-83a8-f9a39873bdef-var-run\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.547862 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/79640773-c4bb-4add-83a8-f9a39873bdef-scripts\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.553258 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/79640773-c4bb-4add-83a8-f9a39873bdef-ovn-controller-tls-certs\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.553191 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79640773-c4bb-4add-83a8-f9a39873bdef-combined-ca-bundle\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.569332 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6px5\" (UniqueName: \"kubernetes.io/projected/79640773-c4bb-4add-83a8-f9a39873bdef-kube-api-access-g6px5\") pod \"ovn-controller-dq7h8\" (UID: \"79640773-c4bb-4add-83a8-f9a39873bdef\") " pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.647901 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-etc-ovs\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.647980 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-log\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.648016 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-run\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.648039 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-lib\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.648088 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee92efdf-106a-4ca0-8e46-8122b820a0d1-scripts\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.648149 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v6j4\" (UniqueName: \"kubernetes.io/projected/ee92efdf-106a-4ca0-8e46-8122b820a0d1-kube-api-access-2v6j4\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.648776 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-run\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.648943 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-lib\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.649074 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-etc-ovs\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.649304 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ee92efdf-106a-4ca0-8e46-8122b820a0d1-var-log\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.652742 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ee92efdf-106a-4ca0-8e46-8122b820a0d1-scripts\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.652745 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.667891 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v6j4\" (UniqueName: \"kubernetes.io/projected/ee92efdf-106a-4ca0-8e46-8122b820a0d1-kube-api-access-2v6j4\") pod \"ovn-controller-ovs-8cjhn\" (UID: \"ee92efdf-106a-4ca0-8e46-8122b820a0d1\") " pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:16 crc kubenswrapper[4767]: I0128 18:48:16.696345 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.199567 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.201766 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.205147 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.205502 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-qknrr" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.206296 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.213144 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.213433 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.214158 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.362165 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.362531 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.362584 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.362621 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8fdcc5ee-189a-4c1e-a652-209015b14ac9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.362662 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fdcc5ee-189a-4c1e-a652-209015b14ac9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.362685 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.362910 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fdcc5ee-189a-4c1e-a652-209015b14ac9-config\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.363104 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfj7g\" (UniqueName: \"kubernetes.io/projected/8fdcc5ee-189a-4c1e-a652-209015b14ac9-kube-api-access-tfj7g\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465350 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfj7g\" (UniqueName: \"kubernetes.io/projected/8fdcc5ee-189a-4c1e-a652-209015b14ac9-kube-api-access-tfj7g\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465468 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465519 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465577 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465611 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8fdcc5ee-189a-4c1e-a652-209015b14ac9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465652 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fdcc5ee-189a-4c1e-a652-209015b14ac9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465676 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.465706 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fdcc5ee-189a-4c1e-a652-209015b14ac9-config\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.467061 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fdcc5ee-189a-4c1e-a652-209015b14ac9-config\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.467318 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8fdcc5ee-189a-4c1e-a652-209015b14ac9-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.467555 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8fdcc5ee-189a-4c1e-a652-209015b14ac9-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.468570 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.471107 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.471178 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.471878 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8fdcc5ee-189a-4c1e-a652-209015b14ac9-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.490816 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfj7g\" (UniqueName: \"kubernetes.io/projected/8fdcc5ee-189a-4c1e-a652-209015b14ac9-kube-api-access-tfj7g\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.497191 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"8fdcc5ee-189a-4c1e-a652-209015b14ac9\") " pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.531196 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.712359 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:48:17 crc kubenswrapper[4767]: I0128 18:48:17.800995 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"837cd3ca-8015-435a-b908-0b46125d68ae","Type":"ContainerStarted","Data":"adfcb6e6564124ec3813ef87d81d3a7ade1292296445c3bbae0ca7e57b4d61b8"} Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.240149 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.241944 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.245039 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.245066 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.245246 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-52zvl" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.245377 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.260864 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.346766 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8f18e95b-3769-49b5-a79a-1afd1fed3147-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.346875 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-brzdm\" (UniqueName: \"kubernetes.io/projected/8f18e95b-3769-49b5-a79a-1afd1fed3147-kube-api-access-brzdm\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.346937 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.347004 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f18e95b-3769-49b5-a79a-1afd1fed3147-config\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.347059 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f18e95b-3769-49b5-a79a-1afd1fed3147-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.347084 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.347102 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.347143 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448648 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448696 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8f18e95b-3769-49b5-a79a-1afd1fed3147-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448721 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-brzdm\" (UniqueName: \"kubernetes.io/projected/8f18e95b-3769-49b5-a79a-1afd1fed3147-kube-api-access-brzdm\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448768 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448802 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f18e95b-3769-49b5-a79a-1afd1fed3147-config\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448865 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f18e95b-3769-49b5-a79a-1afd1fed3147-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448917 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.448942 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.449279 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.450300 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f18e95b-3769-49b5-a79a-1afd1fed3147-config\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.450582 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8f18e95b-3769-49b5-a79a-1afd1fed3147-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.451383 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/8f18e95b-3769-49b5-a79a-1afd1fed3147-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.457223 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.457362 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.463419 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8f18e95b-3769-49b5-a79a-1afd1fed3147-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.469234 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-brzdm\" (UniqueName: \"kubernetes.io/projected/8f18e95b-3769-49b5-a79a-1afd1fed3147-kube-api-access-brzdm\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.474926 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"ovsdbserver-sb-0\" (UID: \"8f18e95b-3769-49b5-a79a-1afd1fed3147\") " pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:20 crc kubenswrapper[4767]: I0128 18:48:20.577917 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.530410 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.531327 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b4tsz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-666b6646f7-j6m9p_openstack(cb9ed350-6e31-4065-992e-e10644be1c7d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.532515 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.559841 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.560092 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hx7lw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-nbvnb_openstack(bac6b5c2-50a2-4af9-a1d0-2542da258fe4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.561445 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.948607 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" Jan 28 18:48:31 crc kubenswrapper[4767]: E0128 18:48:31.950487 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" Jan 28 18:48:33 crc kubenswrapper[4767]: E0128 18:48:33.308273 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 18:48:33 crc kubenswrapper[4767]: E0128 18:48:33.308678 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-vrbbs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-wdmbx_openstack(6dcf2967-51f6-4489-8f2d-3028e5d3b808): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:48:33 crc kubenswrapper[4767]: E0128 18:48:33.310120 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" podUID="6dcf2967-51f6-4489-8f2d-3028e5d3b808" Jan 28 18:48:33 crc kubenswrapper[4767]: E0128 18:48:33.344436 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Jan 28 18:48:33 crc kubenswrapper[4767]: E0128 18:48:33.344882 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tzfgk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-925qz_openstack(80a523d6-89d8-4bcb-a7d0-1ab05465897d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:48:33 crc kubenswrapper[4767]: E0128 18:48:33.347321 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" podUID="80a523d6-89d8-4bcb-a7d0-1ab05465897d" Jan 28 18:48:33 crc kubenswrapper[4767]: I0128 18:48:33.805562 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 28 18:48:33 crc kubenswrapper[4767]: W0128 18:48:33.809447 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podae717a60_f106_49ec_abaa_3be941f7f907.slice/crio-4dc29f47ca56a07e642e0d5a11a16120303bed1f5910fc5f8a2dd1fd303aa9a8 WatchSource:0}: Error finding container 4dc29f47ca56a07e642e0d5a11a16120303bed1f5910fc5f8a2dd1fd303aa9a8: Status 404 returned error can't find the container with id 4dc29f47ca56a07e642e0d5a11a16120303bed1f5910fc5f8a2dd1fd303aa9a8 Jan 28 18:48:33 crc kubenswrapper[4767]: I0128 18:48:33.894657 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:48:33 crc kubenswrapper[4767]: I0128 18:48:33.911342 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dq7h8"] Jan 28 18:48:33 crc kubenswrapper[4767]: I0128 18:48:33.975539 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ae717a60-f106-49ec-abaa-3be941f7f907","Type":"ContainerStarted","Data":"cd2c11366697f141617ca16313ebc9aeb4ec95a9fc942106a83cd917c5b58a26"} Jan 28 18:48:33 crc kubenswrapper[4767]: I0128 18:48:33.975589 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ae717a60-f106-49ec-abaa-3be941f7f907","Type":"ContainerStarted","Data":"4dc29f47ca56a07e642e0d5a11a16120303bed1f5910fc5f8a2dd1fd303aa9a8"} Jan 28 18:48:33 crc kubenswrapper[4767]: I0128 18:48:33.978631 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"837cd3ca-8015-435a-b908-0b46125d68ae","Type":"ContainerStarted","Data":"33f47061ddc7e7924a9d434f94bfbc34bd3c9a46bf8dc497b054643ac540c472"} Jan 28 18:48:33 crc kubenswrapper[4767]: I0128 18:48:33.999639 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.144809 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.632365 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.647585 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.667998 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzfgk\" (UniqueName: \"kubernetes.io/projected/80a523d6-89d8-4bcb-a7d0-1ab05465897d-kube-api-access-tzfgk\") pod \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.668068 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-dns-svc\") pod \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.668099 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6dcf2967-51f6-4489-8f2d-3028e5d3b808-config\") pod \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.668132 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vrbbs\" (UniqueName: \"kubernetes.io/projected/6dcf2967-51f6-4489-8f2d-3028e5d3b808-kube-api-access-vrbbs\") pod \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\" (UID: \"6dcf2967-51f6-4489-8f2d-3028e5d3b808\") " Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.668183 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-config\") pod \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\" (UID: \"80a523d6-89d8-4bcb-a7d0-1ab05465897d\") " Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.668942 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "80a523d6-89d8-4bcb-a7d0-1ab05465897d" (UID: "80a523d6-89d8-4bcb-a7d0-1ab05465897d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.670892 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-config" (OuterVolumeSpecName: "config") pod "80a523d6-89d8-4bcb-a7d0-1ab05465897d" (UID: "80a523d6-89d8-4bcb-a7d0-1ab05465897d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.670905 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6dcf2967-51f6-4489-8f2d-3028e5d3b808-config" (OuterVolumeSpecName: "config") pod "6dcf2967-51f6-4489-8f2d-3028e5d3b808" (UID: "6dcf2967-51f6-4489-8f2d-3028e5d3b808"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.680643 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dcf2967-51f6-4489-8f2d-3028e5d3b808-kube-api-access-vrbbs" (OuterVolumeSpecName: "kube-api-access-vrbbs") pod "6dcf2967-51f6-4489-8f2d-3028e5d3b808" (UID: "6dcf2967-51f6-4489-8f2d-3028e5d3b808"). InnerVolumeSpecName "kube-api-access-vrbbs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.681580 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80a523d6-89d8-4bcb-a7d0-1ab05465897d-kube-api-access-tzfgk" (OuterVolumeSpecName: "kube-api-access-tzfgk") pod "80a523d6-89d8-4bcb-a7d0-1ab05465897d" (UID: "80a523d6-89d8-4bcb-a7d0-1ab05465897d"). InnerVolumeSpecName "kube-api-access-tzfgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.770609 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzfgk\" (UniqueName: \"kubernetes.io/projected/80a523d6-89d8-4bcb-a7d0-1ab05465897d-kube-api-access-tzfgk\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.770650 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.770663 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6dcf2967-51f6-4489-8f2d-3028e5d3b808-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.770672 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vrbbs\" (UniqueName: \"kubernetes.io/projected/6dcf2967-51f6-4489-8f2d-3028e5d3b808-kube-api-access-vrbbs\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:34 crc kubenswrapper[4767]: I0128 18:48:34.770683 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a523d6-89d8-4bcb-a7d0-1ab05465897d-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.010546 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dq7h8" event={"ID":"79640773-c4bb-4add-83a8-f9a39873bdef","Type":"ContainerStarted","Data":"2709673853ac828e2370ba68474cc76e3bd366cbe3ac653ac02edf0e304975ba"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.012851 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" event={"ID":"80a523d6-89d8-4bcb-a7d0-1ab05465897d","Type":"ContainerDied","Data":"0ff2c26512741a9475ede3605d0f16901115dbec9c101605b7b5bbfbace3ac8a"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.012898 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-925qz" Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.017490 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd","Type":"ContainerStarted","Data":"3d481c8dcbb169378cc6a6e69a7028df854ea521a63e90c5e4e9eea8fbf1f230"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.019679 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bbea8b85-5bb2-4570-83e7-07dafaade001","Type":"ContainerStarted","Data":"d61967d9f7f7e59ac33e6585b330ce622d177128d25eda2f7e2b479e34bbd907"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.021399 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"07049746-7fe4-45ba-9a83-201db13c3de0","Type":"ContainerStarted","Data":"08d0e1ec8b7bd6bc007f9195792616e5819bdd172195032277f3991104de57d5"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.023671 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"2c76158e-431b-4588-962f-0ac42843de29","Type":"ContainerStarted","Data":"242e5fcded1d5c7e2950a54e1c57d78d77f10b38de5101452f728d625d0c117f"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.025686 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.025726 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-wdmbx" event={"ID":"6dcf2967-51f6-4489-8f2d-3028e5d3b808","Type":"ContainerDied","Data":"d3ff9d8a2d7eaabe9a02b01e3ff26c54d573592fb260723296e5ddda28b7ff3b"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.034982 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"8f18e95b-3769-49b5-a79a-1afd1fed3147","Type":"ContainerStarted","Data":"91903f72b059e6843073520f3aeaf4efad827f6ba65132298e8d3414077e6487"} Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.137505 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-925qz"] Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.160643 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-925qz"] Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.208571 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wdmbx"] Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.220919 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-wdmbx"] Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.258929 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 28 18:48:35 crc kubenswrapper[4767]: I0128 18:48:35.808274 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-8cjhn"] Jan 28 18:48:36 crc kubenswrapper[4767]: I0128 18:48:36.044192 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8fdcc5ee-189a-4c1e-a652-209015b14ac9","Type":"ContainerStarted","Data":"6152494aaa8d1953634c2135f73c56ed80150f647ab18cb531ff77e7513f2ac7"} Jan 28 18:48:36 crc kubenswrapper[4767]: I0128 18:48:36.808252 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dcf2967-51f6-4489-8f2d-3028e5d3b808" path="/var/lib/kubelet/pods/6dcf2967-51f6-4489-8f2d-3028e5d3b808/volumes" Jan 28 18:48:36 crc kubenswrapper[4767]: I0128 18:48:36.808679 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80a523d6-89d8-4bcb-a7d0-1ab05465897d" path="/var/lib/kubelet/pods/80a523d6-89d8-4bcb-a7d0-1ab05465897d/volumes" Jan 28 18:48:37 crc kubenswrapper[4767]: I0128 18:48:37.063164 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-8cjhn" event={"ID":"ee92efdf-106a-4ca0-8e46-8122b820a0d1","Type":"ContainerStarted","Data":"6fc5c28e15605aacb213ab0e0b46b01fce34371155a3f7eb617532430a9a7112"} Jan 28 18:48:38 crc kubenswrapper[4767]: I0128 18:48:38.073316 4767 generic.go:334] "Generic (PLEG): container finished" podID="837cd3ca-8015-435a-b908-0b46125d68ae" containerID="33f47061ddc7e7924a9d434f94bfbc34bd3c9a46bf8dc497b054643ac540c472" exitCode=0 Jan 28 18:48:38 crc kubenswrapper[4767]: I0128 18:48:38.073381 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"837cd3ca-8015-435a-b908-0b46125d68ae","Type":"ContainerDied","Data":"33f47061ddc7e7924a9d434f94bfbc34bd3c9a46bf8dc497b054643ac540c472"} Jan 28 18:48:39 crc kubenswrapper[4767]: I0128 18:48:39.081933 4767 generic.go:334] "Generic (PLEG): container finished" podID="ae717a60-f106-49ec-abaa-3be941f7f907" containerID="cd2c11366697f141617ca16313ebc9aeb4ec95a9fc942106a83cd917c5b58a26" exitCode=0 Jan 28 18:48:39 crc kubenswrapper[4767]: I0128 18:48:39.082020 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ae717a60-f106-49ec-abaa-3be941f7f907","Type":"ContainerDied","Data":"cd2c11366697f141617ca16313ebc9aeb4ec95a9fc942106a83cd917c5b58a26"} Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.117182 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"07049746-7fe4-45ba-9a83-201db13c3de0","Type":"ContainerStarted","Data":"5b49b33ec8c2642b89db42e4df138ae81865289552e3abb54cd8fe2f4a3e08af"} Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.117749 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.128597 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"ae717a60-f106-49ec-abaa-3be941f7f907","Type":"ContainerStarted","Data":"cb3f7931af0d15588f6af805d7943c7311c9e4ede6f3b7a3303f8ad3a87d8ab4"} Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.134858 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"2c76158e-431b-4588-962f-0ac42843de29","Type":"ContainerStarted","Data":"c9391ee18926faa8836982f4136f94d531902a4acc1983c993ea67269e0cabbb"} Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.135300 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.145046 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"837cd3ca-8015-435a-b908-0b46125d68ae","Type":"ContainerStarted","Data":"6cd3260da0175f9b0a07e178b6f46ada44a4b1b16f3dc9e7df617babd73ed364"} Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.146554 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=25.236625394 podStartE2EDuration="30.146513726s" podCreationTimestamp="2026-01-28 18:48:10 +0000 UTC" firstStartedPulling="2026-01-28 18:48:34.097552802 +0000 UTC m=+1120.061735666" lastFinishedPulling="2026-01-28 18:48:39.007441124 +0000 UTC m=+1124.971623998" observedRunningTime="2026-01-28 18:48:40.137822075 +0000 UTC m=+1126.102004949" watchObservedRunningTime="2026-01-28 18:48:40.146513726 +0000 UTC m=+1126.110696600" Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.158251 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"8f18e95b-3769-49b5-a79a-1afd1fed3147","Type":"ContainerStarted","Data":"7f1d02f0687ae1f224eb6acc096883ac528ed997bfd6a8b6782d81eb5d0e58fb"} Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.172885 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=31.17285801 podStartE2EDuration="31.17285801s" podCreationTimestamp="2026-01-28 18:48:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:40.169458243 +0000 UTC m=+1126.133641117" watchObservedRunningTime="2026-01-28 18:48:40.17285801 +0000 UTC m=+1126.137040884" Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.206714 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=22.423778901 podStartE2EDuration="28.206683906s" podCreationTimestamp="2026-01-28 18:48:12 +0000 UTC" firstStartedPulling="2026-01-28 18:48:34.069458825 +0000 UTC m=+1120.033641689" lastFinishedPulling="2026-01-28 18:48:39.85236381 +0000 UTC m=+1125.816546694" observedRunningTime="2026-01-28 18:48:40.194196166 +0000 UTC m=+1126.158379060" watchObservedRunningTime="2026-01-28 18:48:40.206683906 +0000 UTC m=+1126.170866780" Jan 28 18:48:40 crc kubenswrapper[4767]: I0128 18:48:40.225115 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=16.580155742 podStartE2EDuration="32.2250825s" podCreationTimestamp="2026-01-28 18:48:08 +0000 UTC" firstStartedPulling="2026-01-28 18:48:17.712078437 +0000 UTC m=+1103.676261311" lastFinishedPulling="2026-01-28 18:48:33.357005195 +0000 UTC m=+1119.321188069" observedRunningTime="2026-01-28 18:48:40.219907029 +0000 UTC m=+1126.184089903" watchObservedRunningTime="2026-01-28 18:48:40.2250825 +0000 UTC m=+1126.189265374" Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.026376 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.026497 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.171094 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8fdcc5ee-189a-4c1e-a652-209015b14ac9","Type":"ContainerStarted","Data":"fb33e1332be671f0f8f58fde1264ec0a0a38be97ad4eb3f47f2c3420a9fde31f"} Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.174140 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dq7h8" event={"ID":"79640773-c4bb-4add-83a8-f9a39873bdef","Type":"ContainerStarted","Data":"d2ed68cb0fa6548eaf18c279143ee21011f5661e079262c94d33c5645e6ab38c"} Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.174336 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-dq7h8" Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.178902 4767 generic.go:334] "Generic (PLEG): container finished" podID="ee92efdf-106a-4ca0-8e46-8122b820a0d1" containerID="d32efe4756f9c4e6ac9b664584fe7d08ff11a6d525b5ea7f8a941defe3cfa614" exitCode=0 Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.179037 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-8cjhn" event={"ID":"ee92efdf-106a-4ca0-8e46-8122b820a0d1","Type":"ContainerDied","Data":"d32efe4756f9c4e6ac9b664584fe7d08ff11a6d525b5ea7f8a941defe3cfa614"} Jan 28 18:48:41 crc kubenswrapper[4767]: I0128 18:48:41.200377 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-dq7h8" podStartSLOduration=20.0895476 podStartE2EDuration="25.200350127s" podCreationTimestamp="2026-01-28 18:48:16 +0000 UTC" firstStartedPulling="2026-01-28 18:48:34.07892894 +0000 UTC m=+1120.043111814" lastFinishedPulling="2026-01-28 18:48:39.189731467 +0000 UTC m=+1125.153914341" observedRunningTime="2026-01-28 18:48:41.197381995 +0000 UTC m=+1127.161564889" watchObservedRunningTime="2026-01-28 18:48:41.200350127 +0000 UTC m=+1127.164533011" Jan 28 18:48:42 crc kubenswrapper[4767]: I0128 18:48:42.192324 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-8cjhn" event={"ID":"ee92efdf-106a-4ca0-8e46-8122b820a0d1","Type":"ContainerStarted","Data":"59445cfe9203d600838c6c08b95bbc2dad68e29d270e5bc00e1d3ec5842ed590"} Jan 28 18:48:43 crc kubenswrapper[4767]: I0128 18:48:43.202687 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"8fdcc5ee-189a-4c1e-a652-209015b14ac9","Type":"ContainerStarted","Data":"b61fa5c8b01288b27c5d61fa982c375a7470b270f4b26b192abcc81b8acb04d8"} Jan 28 18:48:43 crc kubenswrapper[4767]: I0128 18:48:43.209116 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"8f18e95b-3769-49b5-a79a-1afd1fed3147","Type":"ContainerStarted","Data":"d9aee836d9b7ed6af424bcf5865a58ffc7a2d67fee8723ef27bc684da958c367"} Jan 28 18:48:43 crc kubenswrapper[4767]: I0128 18:48:43.211611 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-8cjhn" event={"ID":"ee92efdf-106a-4ca0-8e46-8122b820a0d1","Type":"ContainerStarted","Data":"da53e575472915190bf23abebadfd869930c4bd396cb9ad43bc8afa58d8d7698"} Jan 28 18:48:43 crc kubenswrapper[4767]: I0128 18:48:43.211848 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:43 crc kubenswrapper[4767]: I0128 18:48:43.234111 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=19.59577438 podStartE2EDuration="27.234090659s" podCreationTimestamp="2026-01-28 18:48:16 +0000 UTC" firstStartedPulling="2026-01-28 18:48:35.232135144 +0000 UTC m=+1121.196318018" lastFinishedPulling="2026-01-28 18:48:42.870451423 +0000 UTC m=+1128.834634297" observedRunningTime="2026-01-28 18:48:43.230788756 +0000 UTC m=+1129.194971640" watchObservedRunningTime="2026-01-28 18:48:43.234090659 +0000 UTC m=+1129.198273533" Jan 28 18:48:43 crc kubenswrapper[4767]: I0128 18:48:43.258348 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-8cjhn" podStartSLOduration=24.224923615 podStartE2EDuration="27.258317715s" podCreationTimestamp="2026-01-28 18:48:16 +0000 UTC" firstStartedPulling="2026-01-28 18:48:36.692892883 +0000 UTC m=+1122.657075757" lastFinishedPulling="2026-01-28 18:48:39.726286983 +0000 UTC m=+1125.690469857" observedRunningTime="2026-01-28 18:48:43.253766273 +0000 UTC m=+1129.217949157" watchObservedRunningTime="2026-01-28 18:48:43.258317715 +0000 UTC m=+1129.222500589" Jan 28 18:48:43 crc kubenswrapper[4767]: I0128 18:48:43.277375 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=15.561996496999999 podStartE2EDuration="24.277352601s" podCreationTimestamp="2026-01-28 18:48:19 +0000 UTC" firstStartedPulling="2026-01-28 18:48:34.145879021 +0000 UTC m=+1120.110061895" lastFinishedPulling="2026-01-28 18:48:42.861235125 +0000 UTC m=+1128.825417999" observedRunningTime="2026-01-28 18:48:43.272840919 +0000 UTC m=+1129.237023793" watchObservedRunningTime="2026-01-28 18:48:43.277352601 +0000 UTC m=+1129.241535475" Jan 28 18:48:44 crc kubenswrapper[4767]: I0128 18:48:44.218589 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:48:44 crc kubenswrapper[4767]: I0128 18:48:44.532333 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:44 crc kubenswrapper[4767]: I0128 18:48:44.573679 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:44 crc kubenswrapper[4767]: I0128 18:48:44.578281 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:44 crc kubenswrapper[4767]: I0128 18:48:44.621986 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.230178 4767 generic.go:334] "Generic (PLEG): container finished" podID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerID="336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44" exitCode=0 Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.230290 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" event={"ID":"bac6b5c2-50a2-4af9-a1d0-2542da258fe4","Type":"ContainerDied","Data":"336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44"} Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.232718 4767 generic.go:334] "Generic (PLEG): container finished" podID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerID="165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6" exitCode=0 Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.232876 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" event={"ID":"cb9ed350-6e31-4065-992e-e10644be1c7d","Type":"ContainerDied","Data":"165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6"} Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.233542 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.233573 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.283852 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.301858 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.615079 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nbvnb"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.659481 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-rclv2"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.661500 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.663920 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.683308 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-rclv2"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.767152 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-h572c"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.768732 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.770796 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.794010 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-h572c"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.814453 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.814510 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.814642 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-config\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.814690 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qd988\" (UniqueName: \"kubernetes.io/projected/2a188e31-b9bb-41e4-93f0-ed4a9e538570-kube-api-access-qd988\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.840349 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j6m9p"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.867903 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8554648995-pbstb"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.871283 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.873074 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.893686 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pbstb"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.916948 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-config\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917025 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-ovs-rundir\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917061 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-config\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917093 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-ovn-rundir\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917125 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qd988\" (UniqueName: \"kubernetes.io/projected/2a188e31-b9bb-41e4-93f0-ed4a9e538570-kube-api-access-qd988\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917154 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917262 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-combined-ca-bundle\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917305 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-njdpl\" (UniqueName: \"kubernetes.io/projected/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-kube-api-access-njdpl\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917343 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.917367 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.918675 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-ovsdbserver-nb\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.918938 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-dns-svc\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.921504 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-config\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.957970 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.962998 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.970236 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-dws6c" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.971412 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.972495 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.984291 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qd988\" (UniqueName: \"kubernetes.io/projected/2a188e31-b9bb-41e4-93f0-ed4a9e538570-kube-api-access-qd988\") pod \"dnsmasq-dns-5bf47b49b7-rclv2\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:45 crc kubenswrapper[4767]: I0128 18:48:45.985227 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.002176 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.004443 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.021895 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-config\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022020 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-combined-ca-bundle\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022072 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-njdpl\" (UniqueName: \"kubernetes.io/projected/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-kube-api-access-njdpl\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022100 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022145 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdc30b2c-2b3b-4e16-8a77-965490805677-scripts\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022169 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8ml9\" (UniqueName: \"kubernetes.io/projected/38cbf822-4dba-4db2-843a-05cc8133fe50-kube-api-access-g8ml9\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022196 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022287 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdc30b2c-2b3b-4e16-8a77-965490805677-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022325 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022386 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdc30b2c-2b3b-4e16-8a77-965490805677-config\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022426 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-ovs-rundir\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022449 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022471 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022498 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-config\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022517 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-ovn-rundir\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022532 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kk2nc\" (UniqueName: \"kubernetes.io/projected/bdc30b2c-2b3b-4e16-8a77-965490805677-kube-api-access-kk2nc\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022548 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-dns-svc\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.022619 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.026726 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-ovs-rundir\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.027334 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-ovn-rundir\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.027849 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-combined-ca-bundle\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.028278 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-config\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.042403 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.051447 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-njdpl\" (UniqueName: \"kubernetes.io/projected/eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73-kube-api-access-njdpl\") pod \"ovn-controller-metrics-h572c\" (UID: \"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73\") " pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.089450 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h572c" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125162 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-config\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125283 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125314 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdc30b2c-2b3b-4e16-8a77-965490805677-scripts\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125333 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8ml9\" (UniqueName: \"kubernetes.io/projected/38cbf822-4dba-4db2-843a-05cc8133fe50-kube-api-access-g8ml9\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125350 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125389 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdc30b2c-2b3b-4e16-8a77-965490805677-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125412 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125452 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdc30b2c-2b3b-4e16-8a77-965490805677-config\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125489 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125506 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125527 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kk2nc\" (UniqueName: \"kubernetes.io/projected/bdc30b2c-2b3b-4e16-8a77-965490805677-kube-api-access-kk2nc\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.125545 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-dns-svc\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.127431 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/bdc30b2c-2b3b-4e16-8a77-965490805677-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.128047 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-dns-svc\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.128279 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-config\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.129273 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-nb\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.133814 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/bdc30b2c-2b3b-4e16-8a77-965490805677-scripts\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.135171 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-sb\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.135691 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bdc30b2c-2b3b-4e16-8a77-965490805677-config\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.139125 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.144519 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.150359 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/bdc30b2c-2b3b-4e16-8a77-965490805677-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.156870 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kk2nc\" (UniqueName: \"kubernetes.io/projected/bdc30b2c-2b3b-4e16-8a77-965490805677-kube-api-access-kk2nc\") pod \"ovn-northd-0\" (UID: \"bdc30b2c-2b3b-4e16-8a77-965490805677\") " pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.157116 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8ml9\" (UniqueName: \"kubernetes.io/projected/38cbf822-4dba-4db2-843a-05cc8133fe50-kube-api-access-g8ml9\") pod \"dnsmasq-dns-8554648995-pbstb\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.170043 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.195960 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.206361 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.286550 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerName="dnsmasq-dns" containerID="cri-o://926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d" gracePeriod=10 Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.286796 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" event={"ID":"bac6b5c2-50a2-4af9-a1d0-2542da258fe4","Type":"ContainerStarted","Data":"926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d"} Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.289971 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.301805 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerName="dnsmasq-dns" containerID="cri-o://26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd" gracePeriod=10 Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.302104 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" event={"ID":"cb9ed350-6e31-4065-992e-e10644be1c7d","Type":"ContainerStarted","Data":"26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd"} Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.302718 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.329622 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" podStartSLOduration=3.596206487 podStartE2EDuration="40.329599789s" podCreationTimestamp="2026-01-28 18:48:06 +0000 UTC" firstStartedPulling="2026-01-28 18:48:07.886603248 +0000 UTC m=+1093.850786122" lastFinishedPulling="2026-01-28 18:48:44.61999655 +0000 UTC m=+1130.584179424" observedRunningTime="2026-01-28 18:48:46.324543091 +0000 UTC m=+1132.288725965" watchObservedRunningTime="2026-01-28 18:48:46.329599789 +0000 UTC m=+1132.293782663" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.359992 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" podStartSLOduration=3.104325667 podStartE2EDuration="40.359974528s" podCreationTimestamp="2026-01-28 18:48:06 +0000 UTC" firstStartedPulling="2026-01-28 18:48:07.364010168 +0000 UTC m=+1093.328193042" lastFinishedPulling="2026-01-28 18:48:44.619659029 +0000 UTC m=+1130.583841903" observedRunningTime="2026-01-28 18:48:46.353108774 +0000 UTC m=+1132.317291668" watchObservedRunningTime="2026-01-28 18:48:46.359974528 +0000 UTC m=+1132.324157402" Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.531930 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-rclv2"] Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.780021 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-h572c"] Jan 28 18:48:46 crc kubenswrapper[4767]: W0128 18:48:46.800947 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeb5655f4_ca65_4c3f_9ea1_2d81b3c54b73.slice/crio-b328de5640ad91dfa9cdf82e0f0b088b1d49407839d5aca2e10be3224980a78f WatchSource:0}: Error finding container b328de5640ad91dfa9cdf82e0f0b088b1d49407839d5aca2e10be3224980a78f: Status 404 returned error can't find the container with id b328de5640ad91dfa9cdf82e0f0b088b1d49407839d5aca2e10be3224980a78f Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.913520 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.930713 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pbstb"] Jan 28 18:48:46 crc kubenswrapper[4767]: W0128 18:48:46.966728 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38cbf822_4dba_4db2_843a_05cc8133fe50.slice/crio-b7db94327a2e3a6dbdaed33db1abd2ca285e9d33450b194e8cc383814b1d1bae WatchSource:0}: Error finding container b7db94327a2e3a6dbdaed33db1abd2ca285e9d33450b194e8cc383814b1d1bae: Status 404 returned error can't find the container with id b7db94327a2e3a6dbdaed33db1abd2ca285e9d33450b194e8cc383814b1d1bae Jan 28 18:48:46 crc kubenswrapper[4767]: I0128 18:48:46.974488 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.028270 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.057503 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-config\") pod \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.057789 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-dns-svc\") pod \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.057956 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx7lw\" (UniqueName: \"kubernetes.io/projected/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-kube-api-access-hx7lw\") pod \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\" (UID: \"bac6b5c2-50a2-4af9-a1d0-2542da258fe4\") " Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.070025 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-kube-api-access-hx7lw" (OuterVolumeSpecName: "kube-api-access-hx7lw") pod "bac6b5c2-50a2-4af9-a1d0-2542da258fe4" (UID: "bac6b5c2-50a2-4af9-a1d0-2542da258fe4"). InnerVolumeSpecName "kube-api-access-hx7lw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.102262 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-config" (OuterVolumeSpecName: "config") pod "bac6b5c2-50a2-4af9-a1d0-2542da258fe4" (UID: "bac6b5c2-50a2-4af9-a1d0-2542da258fe4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.114385 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bac6b5c2-50a2-4af9-a1d0-2542da258fe4" (UID: "bac6b5c2-50a2-4af9-a1d0-2542da258fe4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.134983 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.160595 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-dns-svc\") pod \"cb9ed350-6e31-4065-992e-e10644be1c7d\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.162462 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4tsz\" (UniqueName: \"kubernetes.io/projected/cb9ed350-6e31-4065-992e-e10644be1c7d-kube-api-access-b4tsz\") pod \"cb9ed350-6e31-4065-992e-e10644be1c7d\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.162510 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-config\") pod \"cb9ed350-6e31-4065-992e-e10644be1c7d\" (UID: \"cb9ed350-6e31-4065-992e-e10644be1c7d\") " Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.163172 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx7lw\" (UniqueName: \"kubernetes.io/projected/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-kube-api-access-hx7lw\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.163197 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.163244 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bac6b5c2-50a2-4af9-a1d0-2542da258fe4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.186678 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb9ed350-6e31-4065-992e-e10644be1c7d-kube-api-access-b4tsz" (OuterVolumeSpecName: "kube-api-access-b4tsz") pod "cb9ed350-6e31-4065-992e-e10644be1c7d" (UID: "cb9ed350-6e31-4065-992e-e10644be1c7d"). InnerVolumeSpecName "kube-api-access-b4tsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.221026 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-config" (OuterVolumeSpecName: "config") pod "cb9ed350-6e31-4065-992e-e10644be1c7d" (UID: "cb9ed350-6e31-4065-992e-e10644be1c7d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.233974 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cb9ed350-6e31-4065-992e-e10644be1c7d" (UID: "cb9ed350-6e31-4065-992e-e10644be1c7d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.266047 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4tsz\" (UniqueName: \"kubernetes.io/projected/cb9ed350-6e31-4065-992e-e10644be1c7d-kube-api-access-b4tsz\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.266109 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.266123 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cb9ed350-6e31-4065-992e-e10644be1c7d-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.317796 4767 generic.go:334] "Generic (PLEG): container finished" podID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerID="926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d" exitCode=0 Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.317884 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" event={"ID":"bac6b5c2-50a2-4af9-a1d0-2542da258fe4","Type":"ContainerDied","Data":"926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.317926 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" event={"ID":"bac6b5c2-50a2-4af9-a1d0-2542da258fe4","Type":"ContainerDied","Data":"0f72093db8853c5c95cc218a9b0ec6352332c92396960b4a3a432b88f1aa6966"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.317946 4767 scope.go:117] "RemoveContainer" containerID="926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.318123 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-nbvnb" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.334725 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pbstb" event={"ID":"38cbf822-4dba-4db2-843a-05cc8133fe50","Type":"ContainerStarted","Data":"b7db94327a2e3a6dbdaed33db1abd2ca285e9d33450b194e8cc383814b1d1bae"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.355518 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"bdc30b2c-2b3b-4e16-8a77-965490805677","Type":"ContainerStarted","Data":"78b7ec86df18f0b7500a4c9fbaad0ee78c8f46d2095526ee0b547c5247c8020b"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.376940 4767 scope.go:117] "RemoveContainer" containerID="336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.382853 4767 generic.go:334] "Generic (PLEG): container finished" podID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerID="26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd" exitCode=0 Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.382941 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" event={"ID":"cb9ed350-6e31-4065-992e-e10644be1c7d","Type":"ContainerDied","Data":"26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.382975 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" event={"ID":"cb9ed350-6e31-4065-992e-e10644be1c7d","Type":"ContainerDied","Data":"545238ffd630a1f772281f31b612d03ec38fed325fffbb1ad8531333db627529"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.383044 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-666b6646f7-j6m9p" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.392386 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nbvnb"] Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.393245 4767 generic.go:334] "Generic (PLEG): container finished" podID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerID="d4f199c7368c8fd178500ac71fe6460c23b8542bc6459ef1bec525bb6c4c5eb8" exitCode=0 Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.393313 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" event={"ID":"2a188e31-b9bb-41e4-93f0-ed4a9e538570","Type":"ContainerDied","Data":"d4f199c7368c8fd178500ac71fe6460c23b8542bc6459ef1bec525bb6c4c5eb8"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.393345 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" event={"ID":"2a188e31-b9bb-41e4-93f0-ed4a9e538570","Type":"ContainerStarted","Data":"b9e79ab172b6d42d3105139058f837c9a690bb3fd5f40a25eb8e0309b217ba5f"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.399427 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h572c" event={"ID":"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73","Type":"ContainerStarted","Data":"202991abafbf40bdb0a165679557c065ac37942de5271f4a7ca4dc1749b18218"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.399617 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h572c" event={"ID":"eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73","Type":"ContainerStarted","Data":"b328de5640ad91dfa9cdf82e0f0b088b1d49407839d5aca2e10be3224980a78f"} Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.405012 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.409509 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-nbvnb"] Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.415633 4767 scope.go:117] "RemoveContainer" containerID="926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d" Jan 28 18:48:47 crc kubenswrapper[4767]: E0128 18:48:47.416552 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d\": container with ID starting with 926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d not found: ID does not exist" containerID="926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.416605 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d"} err="failed to get container status \"926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d\": rpc error: code = NotFound desc = could not find container \"926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d\": container with ID starting with 926e831ff6212f66e145b986f44bc309cacbde534049cd459a418a692277f20d not found: ID does not exist" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.416644 4767 scope.go:117] "RemoveContainer" containerID="336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44" Jan 28 18:48:47 crc kubenswrapper[4767]: E0128 18:48:47.417743 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44\": container with ID starting with 336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44 not found: ID does not exist" containerID="336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.417775 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44"} err="failed to get container status \"336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44\": rpc error: code = NotFound desc = could not find container \"336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44\": container with ID starting with 336b05e6cba3bfbbe3bfece504173ec451f73c8cfb2970b7d178bb779daa9c44 not found: ID does not exist" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.417796 4767 scope.go:117] "RemoveContainer" containerID="26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.449136 4767 scope.go:117] "RemoveContainer" containerID="165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.473125 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-h572c" podStartSLOduration=2.47309975 podStartE2EDuration="2.47309975s" podCreationTimestamp="2026-01-28 18:48:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:47.449747541 +0000 UTC m=+1133.413930425" watchObservedRunningTime="2026-01-28 18:48:47.47309975 +0000 UTC m=+1133.437282624" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.513882 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j6m9p"] Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.531642 4767 scope.go:117] "RemoveContainer" containerID="26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.532322 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-666b6646f7-j6m9p"] Jan 28 18:48:47 crc kubenswrapper[4767]: E0128 18:48:47.533330 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd\": container with ID starting with 26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd not found: ID does not exist" containerID="26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.533368 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd"} err="failed to get container status \"26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd\": rpc error: code = NotFound desc = could not find container \"26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd\": container with ID starting with 26d49ba676bcbf3e327783dd37263d8bbe1310d2ae089495714e43bebee2c5fd not found: ID does not exist" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.533397 4767 scope.go:117] "RemoveContainer" containerID="165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6" Jan 28 18:48:47 crc kubenswrapper[4767]: E0128 18:48:47.533674 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6\": container with ID starting with 165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6 not found: ID does not exist" containerID="165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6" Jan 28 18:48:47 crc kubenswrapper[4767]: I0128 18:48:47.533705 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6"} err="failed to get container status \"165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6\": rpc error: code = NotFound desc = could not find container \"165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6\": container with ID starting with 165de16414b6dcf23b9803f584c00339dec2c55042ccf055c87665354ccddfe6 not found: ID does not exist" Jan 28 18:48:48 crc kubenswrapper[4767]: I0128 18:48:48.412678 4767 generic.go:334] "Generic (PLEG): container finished" podID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerID="a2139472d0e8fa7714931b5fac5bf82f1ac557d3d0db03aaf692847b4ebf08ef" exitCode=0 Jan 28 18:48:48 crc kubenswrapper[4767]: I0128 18:48:48.412920 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pbstb" event={"ID":"38cbf822-4dba-4db2-843a-05cc8133fe50","Type":"ContainerDied","Data":"a2139472d0e8fa7714931b5fac5bf82f1ac557d3d0db03aaf692847b4ebf08ef"} Jan 28 18:48:48 crc kubenswrapper[4767]: I0128 18:48:48.417349 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" event={"ID":"2a188e31-b9bb-41e4-93f0-ed4a9e538570","Type":"ContainerStarted","Data":"ba7334b5ae8160a28d37d95f1d559c7e8eb9efe99a9d8f2f6b59627de4732467"} Jan 28 18:48:48 crc kubenswrapper[4767]: I0128 18:48:48.418511 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:48 crc kubenswrapper[4767]: I0128 18:48:48.469703 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" podStartSLOduration=3.469669782 podStartE2EDuration="3.469669782s" podCreationTimestamp="2026-01-28 18:48:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:48.461030572 +0000 UTC m=+1134.425213456" watchObservedRunningTime="2026-01-28 18:48:48.469669782 +0000 UTC m=+1134.433852656" Jan 28 18:48:48 crc kubenswrapper[4767]: I0128 18:48:48.810491 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" path="/var/lib/kubelet/pods/bac6b5c2-50a2-4af9-a1d0-2542da258fe4/volumes" Jan 28 18:48:48 crc kubenswrapper[4767]: I0128 18:48:48.811283 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" path="/var/lib/kubelet/pods/cb9ed350-6e31-4065-992e-e10644be1c7d/volumes" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.436696 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pbstb" event={"ID":"38cbf822-4dba-4db2-843a-05cc8133fe50","Type":"ContainerStarted","Data":"daa596200cd79711632fe9fa67c38b6ee3003b9229ff033adb4cc7d03e97e489"} Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.437444 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.442746 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"bdc30b2c-2b3b-4e16-8a77-965490805677","Type":"ContainerStarted","Data":"b98b94a06ef5d72536b5c537b4776ef3ad5b2cf3dd8a00fa0b7cb6b477c91b06"} Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.442839 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"bdc30b2c-2b3b-4e16-8a77-965490805677","Type":"ContainerStarted","Data":"0ddb33dde753690397ebaeb2a7310311d093d1537d0213c61ca6c8a9e93ae9e5"} Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.477869 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8554648995-pbstb" podStartSLOduration=4.477830326 podStartE2EDuration="4.477830326s" podCreationTimestamp="2026-01-28 18:48:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:49.470332552 +0000 UTC m=+1135.434515446" watchObservedRunningTime="2026-01-28 18:48:49.477830326 +0000 UTC m=+1135.442013200" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.505534 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-ptkp8"] Jan 28 18:48:49 crc kubenswrapper[4767]: E0128 18:48:49.506232 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerName="dnsmasq-dns" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.506263 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerName="dnsmasq-dns" Jan 28 18:48:49 crc kubenswrapper[4767]: E0128 18:48:49.506306 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerName="dnsmasq-dns" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.506314 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerName="dnsmasq-dns" Jan 28 18:48:49 crc kubenswrapper[4767]: E0128 18:48:49.506341 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerName="init" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.506349 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerName="init" Jan 28 18:48:49 crc kubenswrapper[4767]: E0128 18:48:49.506369 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerName="init" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.506377 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerName="init" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.506615 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb9ed350-6e31-4065-992e-e10644be1c7d" containerName="dnsmasq-dns" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.506635 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bac6b5c2-50a2-4af9-a1d0-2542da258fe4" containerName="dnsmasq-dns" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.507632 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.511567 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.523830 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-ptkp8"] Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.534688 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=3.029219406 podStartE2EDuration="4.534661431s" podCreationTimestamp="2026-01-28 18:48:45 +0000 UTC" firstStartedPulling="2026-01-28 18:48:46.973558609 +0000 UTC m=+1132.937741483" lastFinishedPulling="2026-01-28 18:48:48.479000624 +0000 UTC m=+1134.443183508" observedRunningTime="2026-01-28 18:48:49.504242551 +0000 UTC m=+1135.468425455" watchObservedRunningTime="2026-01-28 18:48:49.534661431 +0000 UTC m=+1135.498844305" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.572246 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.573345 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.618560 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j992w\" (UniqueName: \"kubernetes.io/projected/a796f512-5b4c-4ba3-8e10-97e19818478f-kube-api-access-j992w\") pod \"root-account-create-update-ptkp8\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.619092 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a796f512-5b4c-4ba3-8e10-97e19818478f-operator-scripts\") pod \"root-account-create-update-ptkp8\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.661635 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.721354 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j992w\" (UniqueName: \"kubernetes.io/projected/a796f512-5b4c-4ba3-8e10-97e19818478f-kube-api-access-j992w\") pod \"root-account-create-update-ptkp8\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.721594 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a796f512-5b4c-4ba3-8e10-97e19818478f-operator-scripts\") pod \"root-account-create-update-ptkp8\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.723523 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a796f512-5b4c-4ba3-8e10-97e19818478f-operator-scripts\") pod \"root-account-create-update-ptkp8\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.757202 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j992w\" (UniqueName: \"kubernetes.io/projected/a796f512-5b4c-4ba3-8e10-97e19818478f-kube-api-access-j992w\") pod \"root-account-create-update-ptkp8\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:49 crc kubenswrapper[4767]: I0128 18:48:49.841896 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.342631 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-ptkp8"] Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.453475 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ptkp8" event={"ID":"a796f512-5b4c-4ba3-8e10-97e19818478f","Type":"ContainerStarted","Data":"7e00f79e7d5ab930154572e2f979ceca3312bd6b78cb8c19ac05a9be01843d85"} Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.454093 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.548332 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.767700 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-f0b5-account-create-update-lgdx8"] Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.769563 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.772645 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.776861 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f0b5-account-create-update-lgdx8"] Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.848922 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-operator-scripts\") pod \"keystone-f0b5-account-create-update-lgdx8\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.849298 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-822qn\" (UniqueName: \"kubernetes.io/projected/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-kube-api-access-822qn\") pod \"keystone-f0b5-account-create-update-lgdx8\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.951504 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-822qn\" (UniqueName: \"kubernetes.io/projected/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-kube-api-access-822qn\") pod \"keystone-f0b5-account-create-update-lgdx8\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.951667 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-operator-scripts\") pod \"keystone-f0b5-account-create-update-lgdx8\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.952732 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-operator-scripts\") pod \"keystone-f0b5-account-create-update-lgdx8\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:50 crc kubenswrapper[4767]: I0128 18:48:50.975184 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-822qn\" (UniqueName: \"kubernetes.io/projected/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-kube-api-access-822qn\") pod \"keystone-f0b5-account-create-update-lgdx8\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.099412 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.197667 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-vfqm5"] Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.199449 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.214798 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-vfqm5"] Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.258950 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z4h2\" (UniqueName: \"kubernetes.io/projected/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-kube-api-access-5z4h2\") pod \"placement-db-create-vfqm5\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.262768 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-operator-scripts\") pod \"placement-db-create-vfqm5\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.309340 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-af6e-account-create-update-6t8hx"] Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.314902 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.320553 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.325548 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-af6e-account-create-update-6t8hx"] Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.365979 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-operator-scripts\") pod \"placement-db-create-vfqm5\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.366173 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z4h2\" (UniqueName: \"kubernetes.io/projected/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-kube-api-access-5z4h2\") pod \"placement-db-create-vfqm5\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.372788 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-operator-scripts\") pod \"placement-db-create-vfqm5\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.390556 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z4h2\" (UniqueName: \"kubernetes.io/projected/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-kube-api-access-5z4h2\") pod \"placement-db-create-vfqm5\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.467288 4767 generic.go:334] "Generic (PLEG): container finished" podID="a796f512-5b4c-4ba3-8e10-97e19818478f" containerID="389f29fcfb4135d774ea8cab63fb8bfb5c6165befbe491f4aa19dae103cdc380" exitCode=0 Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.467591 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ptkp8" event={"ID":"a796f512-5b4c-4ba3-8e10-97e19818478f","Type":"ContainerDied","Data":"389f29fcfb4135d774ea8cab63fb8bfb5c6165befbe491f4aa19dae103cdc380"} Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.469342 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7b7g\" (UniqueName: \"kubernetes.io/projected/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-kube-api-access-v7b7g\") pod \"placement-af6e-account-create-update-6t8hx\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.469454 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-operator-scripts\") pod \"placement-af6e-account-create-update-6t8hx\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.539178 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.571764 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7b7g\" (UniqueName: \"kubernetes.io/projected/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-kube-api-access-v7b7g\") pod \"placement-af6e-account-create-update-6t8hx\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.571862 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-operator-scripts\") pod \"placement-af6e-account-create-update-6t8hx\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.576271 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-operator-scripts\") pod \"placement-af6e-account-create-update-6t8hx\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.602140 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7b7g\" (UniqueName: \"kubernetes.io/projected/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-kube-api-access-v7b7g\") pod \"placement-af6e-account-create-update-6t8hx\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.635986 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-f0b5-account-create-update-lgdx8"] Jan 28 18:48:51 crc kubenswrapper[4767]: W0128 18:48:51.642290 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6fef38ea_0f81_4ed7_b7f4_9d26d95c2e15.slice/crio-a6a25526b94f01f854d0244bf81726f2826b4336084f957aa99f5063fe026db6 WatchSource:0}: Error finding container a6a25526b94f01f854d0244bf81726f2826b4336084f957aa99f5063fe026db6: Status 404 returned error can't find the container with id a6a25526b94f01f854d0244bf81726f2826b4336084f957aa99f5063fe026db6 Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.650592 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:51 crc kubenswrapper[4767]: I0128 18:48:51.938649 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-af6e-account-create-update-6t8hx"] Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.045780 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-vfqm5"] Jan 28 18:48:52 crc kubenswrapper[4767]: W0128 18:48:52.066987 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29c6d6d5_bad8_40df_bd2b_90fb7c37bcd4.slice/crio-d15d599cd788b40dc9e04bfa93593ce52ce731d32419a52054b0fdfcbc107781 WatchSource:0}: Error finding container d15d599cd788b40dc9e04bfa93593ce52ce731d32419a52054b0fdfcbc107781: Status 404 returned error can't find the container with id d15d599cd788b40dc9e04bfa93593ce52ce731d32419a52054b0fdfcbc107781 Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.476062 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-af6e-account-create-update-6t8hx" event={"ID":"d7f6bed0-23bc-4917-a2d1-2e6439ef6205","Type":"ContainerStarted","Data":"6df3c79b6ede6f6afc17c942dba2949f2f9ce59e1818276d4d96ce8a687d4de5"} Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.477762 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f0b5-account-create-update-lgdx8" event={"ID":"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15","Type":"ContainerStarted","Data":"f03428e8a4148ed782d6bc54448b718bf7bbfa3ca45e3928ae8ef1851939171e"} Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.477797 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f0b5-account-create-update-lgdx8" event={"ID":"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15","Type":"ContainerStarted","Data":"a6a25526b94f01f854d0244bf81726f2826b4336084f957aa99f5063fe026db6"} Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.479327 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vfqm5" event={"ID":"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4","Type":"ContainerStarted","Data":"d15d599cd788b40dc9e04bfa93593ce52ce731d32419a52054b0fdfcbc107781"} Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.503361 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-f0b5-account-create-update-lgdx8" podStartSLOduration=2.50333238 podStartE2EDuration="2.50333238s" podCreationTimestamp="2026-01-28 18:48:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:52.495806035 +0000 UTC m=+1138.459988919" watchObservedRunningTime="2026-01-28 18:48:52.50333238 +0000 UTC m=+1138.467515254" Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.817621 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.910733 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j992w\" (UniqueName: \"kubernetes.io/projected/a796f512-5b4c-4ba3-8e10-97e19818478f-kube-api-access-j992w\") pod \"a796f512-5b4c-4ba3-8e10-97e19818478f\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.910916 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a796f512-5b4c-4ba3-8e10-97e19818478f-operator-scripts\") pod \"a796f512-5b4c-4ba3-8e10-97e19818478f\" (UID: \"a796f512-5b4c-4ba3-8e10-97e19818478f\") " Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.913145 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a796f512-5b4c-4ba3-8e10-97e19818478f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a796f512-5b4c-4ba3-8e10-97e19818478f" (UID: "a796f512-5b4c-4ba3-8e10-97e19818478f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:52 crc kubenswrapper[4767]: I0128 18:48:52.922251 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a796f512-5b4c-4ba3-8e10-97e19818478f-kube-api-access-j992w" (OuterVolumeSpecName: "kube-api-access-j992w") pod "a796f512-5b4c-4ba3-8e10-97e19818478f" (UID: "a796f512-5b4c-4ba3-8e10-97e19818478f"). InnerVolumeSpecName "kube-api-access-j992w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.013355 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j992w\" (UniqueName: \"kubernetes.io/projected/a796f512-5b4c-4ba3-8e10-97e19818478f-kube-api-access-j992w\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.013412 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a796f512-5b4c-4ba3-8e10-97e19818478f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.117933 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.230163 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-rclv2"] Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.230481 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" podUID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerName="dnsmasq-dns" containerID="cri-o://ba7334b5ae8160a28d37d95f1d559c7e8eb9efe99a9d8f2f6b59627de4732467" gracePeriod=10 Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.232347 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.263819 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tbfrc"] Jan 28 18:48:53 crc kubenswrapper[4767]: E0128 18:48:53.264485 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a796f512-5b4c-4ba3-8e10-97e19818478f" containerName="mariadb-account-create-update" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.264518 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a796f512-5b4c-4ba3-8e10-97e19818478f" containerName="mariadb-account-create-update" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.265326 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a796f512-5b4c-4ba3-8e10-97e19818478f" containerName="mariadb-account-create-update" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.266769 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.297611 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tbfrc"] Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.320548 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.320642 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-config\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.320666 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.320750 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.320796 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z2kv5\" (UniqueName: \"kubernetes.io/projected/542e241a-df61-4376-8a81-8b82452979d0-kube-api-access-z2kv5\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.422507 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.422563 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-config\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.422582 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.422631 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.422649 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z2kv5\" (UniqueName: \"kubernetes.io/projected/542e241a-df61-4376-8a81-8b82452979d0-kube-api-access-z2kv5\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.423819 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-sb\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.424375 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-config\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.424909 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-dns-svc\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.425534 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-nb\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.454433 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z2kv5\" (UniqueName: \"kubernetes.io/projected/542e241a-df61-4376-8a81-8b82452979d0-kube-api-access-z2kv5\") pod \"dnsmasq-dns-b8fbc5445-tbfrc\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.505178 4767 generic.go:334] "Generic (PLEG): container finished" podID="6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15" containerID="f03428e8a4148ed782d6bc54448b718bf7bbfa3ca45e3928ae8ef1851939171e" exitCode=0 Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.505292 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f0b5-account-create-update-lgdx8" event={"ID":"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15","Type":"ContainerDied","Data":"f03428e8a4148ed782d6bc54448b718bf7bbfa3ca45e3928ae8ef1851939171e"} Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.510112 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-ptkp8" event={"ID":"a796f512-5b4c-4ba3-8e10-97e19818478f","Type":"ContainerDied","Data":"7e00f79e7d5ab930154572e2f979ceca3312bd6b78cb8c19ac05a9be01843d85"} Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.510168 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e00f79e7d5ab930154572e2f979ceca3312bd6b78cb8c19ac05a9be01843d85" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.510268 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-ptkp8" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.524342 4767 generic.go:334] "Generic (PLEG): container finished" podID="29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4" containerID="f663a8faba36af3ac56d6aa0cb4d23e7911f0260f623f384e9d59a54b4f7de22" exitCode=0 Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.524446 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vfqm5" event={"ID":"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4","Type":"ContainerDied","Data":"f663a8faba36af3ac56d6aa0cb4d23e7911f0260f623f384e9d59a54b4f7de22"} Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.526772 4767 generic.go:334] "Generic (PLEG): container finished" podID="d7f6bed0-23bc-4917-a2d1-2e6439ef6205" containerID="67a62af277ea081937b0dfaff2d0c59fe92e22b9640f4b7fcfa05645a22e13d0" exitCode=0 Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.526849 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-af6e-account-create-update-6t8hx" event={"ID":"d7f6bed0-23bc-4917-a2d1-2e6439ef6205","Type":"ContainerDied","Data":"67a62af277ea081937b0dfaff2d0c59fe92e22b9640f4b7fcfa05645a22e13d0"} Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.537688 4767 generic.go:334] "Generic (PLEG): container finished" podID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerID="ba7334b5ae8160a28d37d95f1d559c7e8eb9efe99a9d8f2f6b59627de4732467" exitCode=0 Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.537765 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" event={"ID":"2a188e31-b9bb-41e4-93f0-ed4a9e538570","Type":"ContainerDied","Data":"ba7334b5ae8160a28d37d95f1d559c7e8eb9efe99a9d8f2f6b59627de4732467"} Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.594664 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.799994 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.930975 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qd988\" (UniqueName: \"kubernetes.io/projected/2a188e31-b9bb-41e4-93f0-ed4a9e538570-kube-api-access-qd988\") pod \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.931141 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-config\") pod \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.931432 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-ovsdbserver-nb\") pod \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.931517 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-dns-svc\") pod \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\" (UID: \"2a188e31-b9bb-41e4-93f0-ed4a9e538570\") " Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.939416 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a188e31-b9bb-41e4-93f0-ed4a9e538570-kube-api-access-qd988" (OuterVolumeSpecName: "kube-api-access-qd988") pod "2a188e31-b9bb-41e4-93f0-ed4a9e538570" (UID: "2a188e31-b9bb-41e4-93f0-ed4a9e538570"). InnerVolumeSpecName "kube-api-access-qd988". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:53 crc kubenswrapper[4767]: I0128 18:48:53.985273 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "2a188e31-b9bb-41e4-93f0-ed4a9e538570" (UID: "2a188e31-b9bb-41e4-93f0-ed4a9e538570"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.001321 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-config" (OuterVolumeSpecName: "config") pod "2a188e31-b9bb-41e4-93f0-ed4a9e538570" (UID: "2a188e31-b9bb-41e4-93f0-ed4a9e538570"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.013043 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "2a188e31-b9bb-41e4-93f0-ed4a9e538570" (UID: "2a188e31-b9bb-41e4-93f0-ed4a9e538570"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.034747 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.035168 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.035187 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qd988\" (UniqueName: \"kubernetes.io/projected/2a188e31-b9bb-41e4-93f0-ed4a9e538570-kube-api-access-qd988\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.035227 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2a188e31-b9bb-41e4-93f0-ed4a9e538570-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.107952 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tbfrc"] Jan 28 18:48:54 crc kubenswrapper[4767]: W0128 18:48:54.118162 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod542e241a_df61_4376_8a81_8b82452979d0.slice/crio-ef6a6f76f119a587c119bf42dcb3bd954d940925c68701bfdc82681e2c802e16 WatchSource:0}: Error finding container ef6a6f76f119a587c119bf42dcb3bd954d940925c68701bfdc82681e2c802e16: Status 404 returned error can't find the container with id ef6a6f76f119a587c119bf42dcb3bd954d940925c68701bfdc82681e2c802e16 Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.429674 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 28 18:48:54 crc kubenswrapper[4767]: E0128 18:48:54.431108 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerName="init" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.431133 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerName="init" Jan 28 18:48:54 crc kubenswrapper[4767]: E0128 18:48:54.431149 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerName="dnsmasq-dns" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.431156 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerName="dnsmasq-dns" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.431707 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" containerName="dnsmasq-dns" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.449885 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.452566 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.455308 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-dt6w2" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.455852 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.456051 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.457643 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.545766 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ds5hj\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-kube-api-access-ds5hj\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.545852 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c482494c-49e9-4314-a836-a7bea8f6f8c4-cache\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.545973 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c482494c-49e9-4314-a836-a7bea8f6f8c4-lock\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.546056 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.546107 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.546138 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c482494c-49e9-4314-a836-a7bea8f6f8c4-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.548912 4767 generic.go:334] "Generic (PLEG): container finished" podID="542e241a-df61-4376-8a81-8b82452979d0" containerID="c6f9d5726aebc7dd73f0378e37e94829a8748b377b508453c1cf2243439643c7" exitCode=0 Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.549020 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" event={"ID":"542e241a-df61-4376-8a81-8b82452979d0","Type":"ContainerDied","Data":"c6f9d5726aebc7dd73f0378e37e94829a8748b377b508453c1cf2243439643c7"} Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.549054 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" event={"ID":"542e241a-df61-4376-8a81-8b82452979d0","Type":"ContainerStarted","Data":"ef6a6f76f119a587c119bf42dcb3bd954d940925c68701bfdc82681e2c802e16"} Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.554857 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" event={"ID":"2a188e31-b9bb-41e4-93f0-ed4a9e538570","Type":"ContainerDied","Data":"b9e79ab172b6d42d3105139058f837c9a690bb3fd5f40a25eb8e0309b217ba5f"} Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.554960 4767 scope.go:117] "RemoveContainer" containerID="ba7334b5ae8160a28d37d95f1d559c7e8eb9efe99a9d8f2f6b59627de4732467" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.555026 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5bf47b49b7-rclv2" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.588287 4767 scope.go:117] "RemoveContainer" containerID="d4f199c7368c8fd178500ac71fe6460c23b8542bc6459ef1bec525bb6c4c5eb8" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.607854 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-rclv2"] Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.614062 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5bf47b49b7-rclv2"] Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.647697 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ds5hj\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-kube-api-access-ds5hj\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.648323 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c482494c-49e9-4314-a836-a7bea8f6f8c4-cache\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.648496 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c482494c-49e9-4314-a836-a7bea8f6f8c4-lock\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.648638 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.648675 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.649119 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c482494c-49e9-4314-a836-a7bea8f6f8c4-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.649740 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.649781 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/c482494c-49e9-4314-a836-a7bea8f6f8c4-cache\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.650325 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/c482494c-49e9-4314-a836-a7bea8f6f8c4-lock\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: E0128 18:48:54.650578 4767 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 18:48:54 crc kubenswrapper[4767]: E0128 18:48:54.650608 4767 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 18:48:54 crc kubenswrapper[4767]: E0128 18:48:54.650676 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift podName:c482494c-49e9-4314-a836-a7bea8f6f8c4 nodeName:}" failed. No retries permitted until 2026-01-28 18:48:55.150655159 +0000 UTC m=+1141.114838093 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift") pod "swift-storage-0" (UID: "c482494c-49e9-4314-a836-a7bea8f6f8c4") : configmap "swift-ring-files" not found Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.656089 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c482494c-49e9-4314-a836-a7bea8f6f8c4-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.669625 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ds5hj\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-kube-api-access-ds5hj\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.684071 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.848932 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a188e31-b9bb-41e4-93f0-ed4a9e538570" path="/var/lib/kubelet/pods/2a188e31-b9bb-41e4-93f0-ed4a9e538570/volumes" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.885979 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.953569 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-operator-scripts\") pod \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.953776 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5z4h2\" (UniqueName: \"kubernetes.io/projected/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-kube-api-access-5z4h2\") pod \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\" (UID: \"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4\") " Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.954951 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4" (UID: "29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:54 crc kubenswrapper[4767]: I0128 18:48:54.977354 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-kube-api-access-5z4h2" (OuterVolumeSpecName: "kube-api-access-5z4h2") pod "29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4" (UID: "29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4"). InnerVolumeSpecName "kube-api-access-5z4h2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.056065 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.056640 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5z4h2\" (UniqueName: \"kubernetes.io/projected/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4-kube-api-access-5z4h2\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.159087 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:55 crc kubenswrapper[4767]: E0128 18:48:55.159434 4767 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 18:48:55 crc kubenswrapper[4767]: E0128 18:48:55.159453 4767 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 18:48:55 crc kubenswrapper[4767]: E0128 18:48:55.159517 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift podName:c482494c-49e9-4314-a836-a7bea8f6f8c4 nodeName:}" failed. No retries permitted until 2026-01-28 18:48:56.15949358 +0000 UTC m=+1142.123676454 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift") pod "swift-storage-0" (UID: "c482494c-49e9-4314-a836-a7bea8f6f8c4") : configmap "swift-ring-files" not found Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.203103 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.260611 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-operator-scripts\") pod \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.260676 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7b7g\" (UniqueName: \"kubernetes.io/projected/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-kube-api-access-v7b7g\") pod \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\" (UID: \"d7f6bed0-23bc-4917-a2d1-2e6439ef6205\") " Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.261376 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d7f6bed0-23bc-4917-a2d1-2e6439ef6205" (UID: "d7f6bed0-23bc-4917-a2d1-2e6439ef6205"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.262225 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.266007 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-kube-api-access-v7b7g" (OuterVolumeSpecName: "kube-api-access-v7b7g") pod "d7f6bed0-23bc-4917-a2d1-2e6439ef6205" (UID: "d7f6bed0-23bc-4917-a2d1-2e6439ef6205"). InnerVolumeSpecName "kube-api-access-v7b7g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.352598 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.363933 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7b7g\" (UniqueName: \"kubernetes.io/projected/d7f6bed0-23bc-4917-a2d1-2e6439ef6205-kube-api-access-v7b7g\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.465760 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-operator-scripts\") pod \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.465884 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-822qn\" (UniqueName: \"kubernetes.io/projected/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-kube-api-access-822qn\") pod \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\" (UID: \"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15\") " Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.466434 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15" (UID: "6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.466738 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.471696 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-kube-api-access-822qn" (OuterVolumeSpecName: "kube-api-access-822qn") pod "6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15" (UID: "6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15"). InnerVolumeSpecName "kube-api-access-822qn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.563305 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-f0b5-account-create-update-lgdx8" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.564339 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-f0b5-account-create-update-lgdx8" event={"ID":"6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15","Type":"ContainerDied","Data":"a6a25526b94f01f854d0244bf81726f2826b4336084f957aa99f5063fe026db6"} Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.564401 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6a25526b94f01f854d0244bf81726f2826b4336084f957aa99f5063fe026db6" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.566671 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-vfqm5" event={"ID":"29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4","Type":"ContainerDied","Data":"d15d599cd788b40dc9e04bfa93593ce52ce731d32419a52054b0fdfcbc107781"} Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.566717 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d15d599cd788b40dc9e04bfa93593ce52ce731d32419a52054b0fdfcbc107781" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.566786 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-vfqm5" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.569541 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-822qn\" (UniqueName: \"kubernetes.io/projected/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15-kube-api-access-822qn\") on node \"crc\" DevicePath \"\"" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.571359 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" event={"ID":"542e241a-df61-4376-8a81-8b82452979d0","Type":"ContainerStarted","Data":"bdd293df75c1a9231891f2ec61ba825228b2169f3e828744b0e0f6d529ebca2d"} Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.571494 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.573370 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-af6e-account-create-update-6t8hx" event={"ID":"d7f6bed0-23bc-4917-a2d1-2e6439ef6205","Type":"ContainerDied","Data":"6df3c79b6ede6f6afc17c942dba2949f2f9ce59e1818276d4d96ce8a687d4de5"} Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.573422 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6df3c79b6ede6f6afc17c942dba2949f2f9ce59e1818276d4d96ce8a687d4de5" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.573466 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-af6e-account-create-update-6t8hx" Jan 28 18:48:55 crc kubenswrapper[4767]: I0128 18:48:55.607648 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podStartSLOduration=2.607612774 podStartE2EDuration="2.607612774s" podCreationTimestamp="2026-01-28 18:48:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:55.589696585 +0000 UTC m=+1141.553879459" watchObservedRunningTime="2026-01-28 18:48:55.607612774 +0000 UTC m=+1141.571795648" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.187327 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:56 crc kubenswrapper[4767]: E0128 18:48:56.187651 4767 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 18:48:56 crc kubenswrapper[4767]: E0128 18:48:56.188029 4767 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 18:48:56 crc kubenswrapper[4767]: E0128 18:48:56.188131 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift podName:c482494c-49e9-4314-a836-a7bea8f6f8c4 nodeName:}" failed. No retries permitted until 2026-01-28 18:48:58.188096443 +0000 UTC m=+1144.152279327 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift") pod "swift-storage-0" (UID: "c482494c-49e9-4314-a836-a7bea8f6f8c4") : configmap "swift-ring-files" not found Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.198480 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.433748 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-dhkzb"] Jan 28 18:48:56 crc kubenswrapper[4767]: E0128 18:48:56.434119 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7f6bed0-23bc-4917-a2d1-2e6439ef6205" containerName="mariadb-account-create-update" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.434133 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7f6bed0-23bc-4917-a2d1-2e6439ef6205" containerName="mariadb-account-create-update" Jan 28 18:48:56 crc kubenswrapper[4767]: E0128 18:48:56.434149 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4" containerName="mariadb-database-create" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.434155 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4" containerName="mariadb-database-create" Jan 28 18:48:56 crc kubenswrapper[4767]: E0128 18:48:56.434166 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15" containerName="mariadb-account-create-update" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.434172 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15" containerName="mariadb-account-create-update" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.434366 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15" containerName="mariadb-account-create-update" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.434381 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7f6bed0-23bc-4917-a2d1-2e6439ef6205" containerName="mariadb-account-create-update" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.434399 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4" containerName="mariadb-database-create" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.434908 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.450897 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-dhkzb"] Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.498083 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwtvc\" (UniqueName: \"kubernetes.io/projected/fe833b02-be1c-45d7-88b0-8c18d9471631-kube-api-access-wwtvc\") pod \"glance-db-create-dhkzb\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.499130 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe833b02-be1c-45d7-88b0-8c18d9471631-operator-scripts\") pod \"glance-db-create-dhkzb\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.542003 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-1668-account-create-update-7rp66"] Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.543852 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.553195 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.555784 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1668-account-create-update-7rp66"] Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.609607 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c29bdd1f-3c65-4fbc-9365-b7f446afc233-operator-scripts\") pod \"glance-1668-account-create-update-7rp66\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.609733 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwtvc\" (UniqueName: \"kubernetes.io/projected/fe833b02-be1c-45d7-88b0-8c18d9471631-kube-api-access-wwtvc\") pod \"glance-db-create-dhkzb\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.609762 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9fjs\" (UniqueName: \"kubernetes.io/projected/c29bdd1f-3c65-4fbc-9365-b7f446afc233-kube-api-access-c9fjs\") pod \"glance-1668-account-create-update-7rp66\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.609855 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe833b02-be1c-45d7-88b0-8c18d9471631-operator-scripts\") pod \"glance-db-create-dhkzb\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.611128 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe833b02-be1c-45d7-88b0-8c18d9471631-operator-scripts\") pod \"glance-db-create-dhkzb\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.633118 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwtvc\" (UniqueName: \"kubernetes.io/projected/fe833b02-be1c-45d7-88b0-8c18d9471631-kube-api-access-wwtvc\") pod \"glance-db-create-dhkzb\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.711819 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c29bdd1f-3c65-4fbc-9365-b7f446afc233-operator-scripts\") pod \"glance-1668-account-create-update-7rp66\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.711960 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9fjs\" (UniqueName: \"kubernetes.io/projected/c29bdd1f-3c65-4fbc-9365-b7f446afc233-kube-api-access-c9fjs\") pod \"glance-1668-account-create-update-7rp66\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.712648 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c29bdd1f-3c65-4fbc-9365-b7f446afc233-operator-scripts\") pod \"glance-1668-account-create-update-7rp66\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.735033 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9fjs\" (UniqueName: \"kubernetes.io/projected/c29bdd1f-3c65-4fbc-9365-b7f446afc233-kube-api-access-c9fjs\") pod \"glance-1668-account-create-update-7rp66\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.758126 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhkzb" Jan 28 18:48:56 crc kubenswrapper[4767]: I0128 18:48:56.870160 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:48:57 crc kubenswrapper[4767]: I0128 18:48:57.276335 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-dhkzb"] Jan 28 18:48:57 crc kubenswrapper[4767]: W0128 18:48:57.280810 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfe833b02_be1c_45d7_88b0_8c18d9471631.slice/crio-0c99dbbbd219bd1347bba242fd93f9972a63a0b3321bf57f67f5c5e0becc0449 WatchSource:0}: Error finding container 0c99dbbbd219bd1347bba242fd93f9972a63a0b3321bf57f67f5c5e0becc0449: Status 404 returned error can't find the container with id 0c99dbbbd219bd1347bba242fd93f9972a63a0b3321bf57f67f5c5e0becc0449 Jan 28 18:48:57 crc kubenswrapper[4767]: W0128 18:48:57.451037 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc29bdd1f_3c65_4fbc_9365_b7f446afc233.slice/crio-27264d64b7d8faf1d6223137aea0f82d6e0fdd79778a5320481dfdd4d59582fc WatchSource:0}: Error finding container 27264d64b7d8faf1d6223137aea0f82d6e0fdd79778a5320481dfdd4d59582fc: Status 404 returned error can't find the container with id 27264d64b7d8faf1d6223137aea0f82d6e0fdd79778a5320481dfdd4d59582fc Jan 28 18:48:57 crc kubenswrapper[4767]: I0128 18:48:57.453717 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-1668-account-create-update-7rp66"] Jan 28 18:48:57 crc kubenswrapper[4767]: I0128 18:48:57.596501 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhkzb" event={"ID":"fe833b02-be1c-45d7-88b0-8c18d9471631","Type":"ContainerStarted","Data":"2156592dafc87c9c01fb56a217996f56bdd0917a97377e6ad61bbc59b08f1669"} Jan 28 18:48:57 crc kubenswrapper[4767]: I0128 18:48:57.596552 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhkzb" event={"ID":"fe833b02-be1c-45d7-88b0-8c18d9471631","Type":"ContainerStarted","Data":"0c99dbbbd219bd1347bba242fd93f9972a63a0b3321bf57f67f5c5e0becc0449"} Jan 28 18:48:57 crc kubenswrapper[4767]: I0128 18:48:57.598329 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1668-account-create-update-7rp66" event={"ID":"c29bdd1f-3c65-4fbc-9365-b7f446afc233","Type":"ContainerStarted","Data":"27264d64b7d8faf1d6223137aea0f82d6e0fdd79778a5320481dfdd4d59582fc"} Jan 28 18:48:57 crc kubenswrapper[4767]: I0128 18:48:57.620401 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-dhkzb" podStartSLOduration=1.620375171 podStartE2EDuration="1.620375171s" podCreationTimestamp="2026-01-28 18:48:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:57.614782407 +0000 UTC m=+1143.578965291" watchObservedRunningTime="2026-01-28 18:48:57.620375171 +0000 UTC m=+1143.584558045" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.092921 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-ptkp8"] Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.102971 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-ptkp8"] Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.162101 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-44ggf"] Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.165975 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.169534 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.179841 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-44ggf"] Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.244700 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-698bn\" (UniqueName: \"kubernetes.io/projected/ef954606-bfec-4472-8fff-04d3fa5e3bf0-kube-api-access-698bn\") pod \"root-account-create-update-44ggf\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.244781 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef954606-bfec-4472-8fff-04d3fa5e3bf0-operator-scripts\") pod \"root-account-create-update-44ggf\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.244875 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:48:58 crc kubenswrapper[4767]: E0128 18:48:58.245101 4767 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 18:48:58 crc kubenswrapper[4767]: E0128 18:48:58.245118 4767 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 18:48:58 crc kubenswrapper[4767]: E0128 18:48:58.245172 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift podName:c482494c-49e9-4314-a836-a7bea8f6f8c4 nodeName:}" failed. No retries permitted until 2026-01-28 18:49:02.245150873 +0000 UTC m=+1148.209333747 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift") pod "swift-storage-0" (UID: "c482494c-49e9-4314-a836-a7bea8f6f8c4") : configmap "swift-ring-files" not found Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.272369 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-p8z4r"] Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.274271 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.277722 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.278433 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.282737 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.287292 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-p8z4r"] Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.346620 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-698bn\" (UniqueName: \"kubernetes.io/projected/ef954606-bfec-4472-8fff-04d3fa5e3bf0-kube-api-access-698bn\") pod \"root-account-create-update-44ggf\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.346698 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef954606-bfec-4472-8fff-04d3fa5e3bf0-operator-scripts\") pod \"root-account-create-update-44ggf\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.347778 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef954606-bfec-4472-8fff-04d3fa5e3bf0-operator-scripts\") pod \"root-account-create-update-44ggf\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.378044 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-698bn\" (UniqueName: \"kubernetes.io/projected/ef954606-bfec-4472-8fff-04d3fa5e3bf0-kube-api-access-698bn\") pod \"root-account-create-update-44ggf\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.448996 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-swiftconf\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.449069 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-dispersionconf\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.449120 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-scripts\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.449226 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sfvw\" (UniqueName: \"kubernetes.io/projected/0bc86a10-2d77-4909-aea5-23bb07841492-kube-api-access-2sfvw\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.449260 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-ring-data-devices\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.449312 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0bc86a10-2d77-4909-aea5-23bb07841492-etc-swift\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.449353 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-combined-ca-bundle\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.504892 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-44ggf" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.552242 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sfvw\" (UniqueName: \"kubernetes.io/projected/0bc86a10-2d77-4909-aea5-23bb07841492-kube-api-access-2sfvw\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.552329 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-ring-data-devices\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.552411 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0bc86a10-2d77-4909-aea5-23bb07841492-etc-swift\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.552457 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-combined-ca-bundle\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.552494 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-swiftconf\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.552522 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-dispersionconf\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.552783 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-scripts\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.553294 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0bc86a10-2d77-4909-aea5-23bb07841492-etc-swift\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.553768 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-ring-data-devices\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.555418 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-scripts\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.560342 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-swiftconf\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.562906 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-dispersionconf\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.563840 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-combined-ca-bundle\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.577577 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sfvw\" (UniqueName: \"kubernetes.io/projected/0bc86a10-2d77-4909-aea5-23bb07841492-kube-api-access-2sfvw\") pod \"swift-ring-rebalance-p8z4r\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.597080 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-dt6w2" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.604225 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.619569 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1668-account-create-update-7rp66" event={"ID":"c29bdd1f-3c65-4fbc-9365-b7f446afc233","Type":"ContainerStarted","Data":"4bdf27cd4b3bf526be591961377cd271a0d79b960e1c8c515177ad61fd282029"} Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.649498 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-1668-account-create-update-7rp66" podStartSLOduration=2.64946788 podStartE2EDuration="2.64946788s" podCreationTimestamp="2026-01-28 18:48:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:58.645688191 +0000 UTC m=+1144.609871085" watchObservedRunningTime="2026-01-28 18:48:58.64946788 +0000 UTC m=+1144.613650754" Jan 28 18:48:58 crc kubenswrapper[4767]: I0128 18:48:58.816048 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a796f512-5b4c-4ba3-8e10-97e19818478f" path="/var/lib/kubelet/pods/a796f512-5b4c-4ba3-8e10-97e19818478f/volumes" Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.016524 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-44ggf"] Jan 28 18:48:59 crc kubenswrapper[4767]: W0128 18:48:59.158466 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0bc86a10_2d77_4909_aea5_23bb07841492.slice/crio-7b43b2fb9b1e7bd50c92c7d1f3bb2ad51b5832b4c75e30d7eb1d0599ca7cc135 WatchSource:0}: Error finding container 7b43b2fb9b1e7bd50c92c7d1f3bb2ad51b5832b4c75e30d7eb1d0599ca7cc135: Status 404 returned error can't find the container with id 7b43b2fb9b1e7bd50c92c7d1f3bb2ad51b5832b4c75e30d7eb1d0599ca7cc135 Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.159758 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-p8z4r"] Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.634422 4767 generic.go:334] "Generic (PLEG): container finished" podID="fe833b02-be1c-45d7-88b0-8c18d9471631" containerID="2156592dafc87c9c01fb56a217996f56bdd0917a97377e6ad61bbc59b08f1669" exitCode=0 Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.634506 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhkzb" event={"ID":"fe833b02-be1c-45d7-88b0-8c18d9471631","Type":"ContainerDied","Data":"2156592dafc87c9c01fb56a217996f56bdd0917a97377e6ad61bbc59b08f1669"} Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.637914 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p8z4r" event={"ID":"0bc86a10-2d77-4909-aea5-23bb07841492","Type":"ContainerStarted","Data":"7b43b2fb9b1e7bd50c92c7d1f3bb2ad51b5832b4c75e30d7eb1d0599ca7cc135"} Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.640224 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-44ggf" event={"ID":"ef954606-bfec-4472-8fff-04d3fa5e3bf0","Type":"ContainerStarted","Data":"1314d240d923447f7d45dea9cc06c7db6dd1892b6475cfc81ebaa81b2f830f78"} Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.640287 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-44ggf" event={"ID":"ef954606-bfec-4472-8fff-04d3fa5e3bf0","Type":"ContainerStarted","Data":"6aed3ed3a542e1329500da3cb42ba4618598d95253912a2a2846233e436533e5"} Jan 28 18:48:59 crc kubenswrapper[4767]: I0128 18:48:59.679231 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-44ggf" podStartSLOduration=1.679189767 podStartE2EDuration="1.679189767s" podCreationTimestamp="2026-01-28 18:48:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:48:59.678235727 +0000 UTC m=+1145.642418601" watchObservedRunningTime="2026-01-28 18:48:59.679189767 +0000 UTC m=+1145.643372641" Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.673302 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-2rnmq"] Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.675221 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.679259 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-2rnmq"] Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.808918 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ce20579-faaa-46f4-8d21-ae1fa142da52-operator-scripts\") pod \"keystone-db-create-2rnmq\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.809010 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9qm7\" (UniqueName: \"kubernetes.io/projected/8ce20579-faaa-46f4-8d21-ae1fa142da52-kube-api-access-n9qm7\") pod \"keystone-db-create-2rnmq\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.910977 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9qm7\" (UniqueName: \"kubernetes.io/projected/8ce20579-faaa-46f4-8d21-ae1fa142da52-kube-api-access-n9qm7\") pod \"keystone-db-create-2rnmq\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.911195 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ce20579-faaa-46f4-8d21-ae1fa142da52-operator-scripts\") pod \"keystone-db-create-2rnmq\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.912289 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ce20579-faaa-46f4-8d21-ae1fa142da52-operator-scripts\") pod \"keystone-db-create-2rnmq\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:00 crc kubenswrapper[4767]: I0128 18:49:00.934918 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9qm7\" (UniqueName: \"kubernetes.io/projected/8ce20579-faaa-46f4-8d21-ae1fa142da52-kube-api-access-n9qm7\") pod \"keystone-db-create-2rnmq\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.004078 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.164498 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhkzb" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.320962 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe833b02-be1c-45d7-88b0-8c18d9471631-operator-scripts\") pod \"fe833b02-be1c-45d7-88b0-8c18d9471631\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.321036 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwtvc\" (UniqueName: \"kubernetes.io/projected/fe833b02-be1c-45d7-88b0-8c18d9471631-kube-api-access-wwtvc\") pod \"fe833b02-be1c-45d7-88b0-8c18d9471631\" (UID: \"fe833b02-be1c-45d7-88b0-8c18d9471631\") " Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.322446 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe833b02-be1c-45d7-88b0-8c18d9471631-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fe833b02-be1c-45d7-88b0-8c18d9471631" (UID: "fe833b02-be1c-45d7-88b0-8c18d9471631"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.327697 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe833b02-be1c-45d7-88b0-8c18d9471631-kube-api-access-wwtvc" (OuterVolumeSpecName: "kube-api-access-wwtvc") pod "fe833b02-be1c-45d7-88b0-8c18d9471631" (UID: "fe833b02-be1c-45d7-88b0-8c18d9471631"). InnerVolumeSpecName "kube-api-access-wwtvc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.423063 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwtvc\" (UniqueName: \"kubernetes.io/projected/fe833b02-be1c-45d7-88b0-8c18d9471631-kube-api-access-wwtvc\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.423104 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fe833b02-be1c-45d7-88b0-8c18d9471631-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.500944 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-2rnmq"] Jan 28 18:49:01 crc kubenswrapper[4767]: W0128 18:49:01.506224 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ce20579_faaa_46f4_8d21_ae1fa142da52.slice/crio-792d0bbd92aea009fa90032643672ccfd83d9d4830179f11244b6115554d212b WatchSource:0}: Error finding container 792d0bbd92aea009fa90032643672ccfd83d9d4830179f11244b6115554d212b: Status 404 returned error can't find the container with id 792d0bbd92aea009fa90032643672ccfd83d9d4830179f11244b6115554d212b Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.689724 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-dhkzb" event={"ID":"fe833b02-be1c-45d7-88b0-8c18d9471631","Type":"ContainerDied","Data":"0c99dbbbd219bd1347bba242fd93f9972a63a0b3321bf57f67f5c5e0becc0449"} Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.689995 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c99dbbbd219bd1347bba242fd93f9972a63a0b3321bf57f67f5c5e0becc0449" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.690069 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-dhkzb" Jan 28 18:49:01 crc kubenswrapper[4767]: I0128 18:49:01.694128 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2rnmq" event={"ID":"8ce20579-faaa-46f4-8d21-ae1fa142da52","Type":"ContainerStarted","Data":"792d0bbd92aea009fa90032643672ccfd83d9d4830179f11244b6115554d212b"} Jan 28 18:49:02 crc kubenswrapper[4767]: I0128 18:49:02.341042 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:49:02 crc kubenswrapper[4767]: E0128 18:49:02.341367 4767 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 18:49:02 crc kubenswrapper[4767]: E0128 18:49:02.341396 4767 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 18:49:02 crc kubenswrapper[4767]: E0128 18:49:02.341486 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift podName:c482494c-49e9-4314-a836-a7bea8f6f8c4 nodeName:}" failed. No retries permitted until 2026-01-28 18:49:10.341458317 +0000 UTC m=+1156.305641191 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift") pod "swift-storage-0" (UID: "c482494c-49e9-4314-a836-a7bea8f6f8c4") : configmap "swift-ring-files" not found Jan 28 18:49:02 crc kubenswrapper[4767]: I0128 18:49:02.707498 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2rnmq" event={"ID":"8ce20579-faaa-46f4-8d21-ae1fa142da52","Type":"ContainerStarted","Data":"c873fb105016dbbff060df0427f33fe28c908d5832146799efff79a9a1ac534c"} Jan 28 18:49:02 crc kubenswrapper[4767]: I0128 18:49:02.710338 4767 generic.go:334] "Generic (PLEG): container finished" podID="ef954606-bfec-4472-8fff-04d3fa5e3bf0" containerID="1314d240d923447f7d45dea9cc06c7db6dd1892b6475cfc81ebaa81b2f830f78" exitCode=0 Jan 28 18:49:02 crc kubenswrapper[4767]: I0128 18:49:02.710398 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-44ggf" event={"ID":"ef954606-bfec-4472-8fff-04d3fa5e3bf0","Type":"ContainerDied","Data":"1314d240d923447f7d45dea9cc06c7db6dd1892b6475cfc81ebaa81b2f830f78"} Jan 28 18:49:02 crc kubenswrapper[4767]: I0128 18:49:02.712402 4767 generic.go:334] "Generic (PLEG): container finished" podID="c29bdd1f-3c65-4fbc-9365-b7f446afc233" containerID="4bdf27cd4b3bf526be591961377cd271a0d79b960e1c8c515177ad61fd282029" exitCode=0 Jan 28 18:49:02 crc kubenswrapper[4767]: I0128 18:49:02.712453 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1668-account-create-update-7rp66" event={"ID":"c29bdd1f-3c65-4fbc-9365-b7f446afc233","Type":"ContainerDied","Data":"4bdf27cd4b3bf526be591961377cd271a0d79b960e1c8c515177ad61fd282029"} Jan 28 18:49:02 crc kubenswrapper[4767]: I0128 18:49:02.729445 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-2rnmq" podStartSLOduration=2.729429153 podStartE2EDuration="2.729429153s" podCreationTimestamp="2026-01-28 18:49:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:02.728478514 +0000 UTC m=+1148.692661398" watchObservedRunningTime="2026-01-28 18:49:02.729429153 +0000 UTC m=+1148.693612027" Jan 28 18:49:03 crc kubenswrapper[4767]: I0128 18:49:03.597473 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:49:03 crc kubenswrapper[4767]: I0128 18:49:03.661259 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pbstb"] Jan 28 18:49:03 crc kubenswrapper[4767]: I0128 18:49:03.662021 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8554648995-pbstb" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerName="dnsmasq-dns" containerID="cri-o://daa596200cd79711632fe9fa67c38b6ee3003b9229ff033adb4cc7d03e97e489" gracePeriod=10 Jan 28 18:49:03 crc kubenswrapper[4767]: I0128 18:49:03.721587 4767 generic.go:334] "Generic (PLEG): container finished" podID="8ce20579-faaa-46f4-8d21-ae1fa142da52" containerID="c873fb105016dbbff060df0427f33fe28c908d5832146799efff79a9a1ac534c" exitCode=0 Jan 28 18:49:03 crc kubenswrapper[4767]: I0128 18:49:03.721706 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2rnmq" event={"ID":"8ce20579-faaa-46f4-8d21-ae1fa142da52","Type":"ContainerDied","Data":"c873fb105016dbbff060df0427f33fe28c908d5832146799efff79a9a1ac534c"} Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.510609 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-44ggf" Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.688839 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef954606-bfec-4472-8fff-04d3fa5e3bf0-operator-scripts\") pod \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.688906 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-698bn\" (UniqueName: \"kubernetes.io/projected/ef954606-bfec-4472-8fff-04d3fa5e3bf0-kube-api-access-698bn\") pod \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\" (UID: \"ef954606-bfec-4472-8fff-04d3fa5e3bf0\") " Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.689637 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ef954606-bfec-4472-8fff-04d3fa5e3bf0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ef954606-bfec-4472-8fff-04d3fa5e3bf0" (UID: "ef954606-bfec-4472-8fff-04d3fa5e3bf0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.698429 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ef954606-bfec-4472-8fff-04d3fa5e3bf0-kube-api-access-698bn" (OuterVolumeSpecName: "kube-api-access-698bn") pod "ef954606-bfec-4472-8fff-04d3fa5e3bf0" (UID: "ef954606-bfec-4472-8fff-04d3fa5e3bf0"). InnerVolumeSpecName "kube-api-access-698bn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.734320 4767 generic.go:334] "Generic (PLEG): container finished" podID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerID="daa596200cd79711632fe9fa67c38b6ee3003b9229ff033adb4cc7d03e97e489" exitCode=0 Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.734385 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pbstb" event={"ID":"38cbf822-4dba-4db2-843a-05cc8133fe50","Type":"ContainerDied","Data":"daa596200cd79711632fe9fa67c38b6ee3003b9229ff033adb4cc7d03e97e489"} Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.743497 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-44ggf" event={"ID":"ef954606-bfec-4472-8fff-04d3fa5e3bf0","Type":"ContainerDied","Data":"6aed3ed3a542e1329500da3cb42ba4618598d95253912a2a2846233e436533e5"} Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.743574 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6aed3ed3a542e1329500da3cb42ba4618598d95253912a2a2846233e436533e5" Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.743533 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-44ggf" Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.790695 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ef954606-bfec-4472-8fff-04d3fa5e3bf0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:04 crc kubenswrapper[4767]: I0128 18:49:04.790727 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-698bn\" (UniqueName: \"kubernetes.io/projected/ef954606-bfec-4472-8fff-04d3fa5e3bf0-kube-api-access-698bn\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.232127 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.245927 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.277085 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.386167 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.426441 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9fjs\" (UniqueName: \"kubernetes.io/projected/c29bdd1f-3c65-4fbc-9365-b7f446afc233-kube-api-access-c9fjs\") pod \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.426518 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c29bdd1f-3c65-4fbc-9365-b7f446afc233-operator-scripts\") pod \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\" (UID: \"c29bdd1f-3c65-4fbc-9365-b7f446afc233\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.427393 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c29bdd1f-3c65-4fbc-9365-b7f446afc233-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c29bdd1f-3c65-4fbc-9365-b7f446afc233" (UID: "c29bdd1f-3c65-4fbc-9365-b7f446afc233"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.427567 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ce20579-faaa-46f4-8d21-ae1fa142da52-operator-scripts\") pod \"8ce20579-faaa-46f4-8d21-ae1fa142da52\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.428072 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ce20579-faaa-46f4-8d21-ae1fa142da52-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ce20579-faaa-46f4-8d21-ae1fa142da52" (UID: "8ce20579-faaa-46f4-8d21-ae1fa142da52"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.428190 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9qm7\" (UniqueName: \"kubernetes.io/projected/8ce20579-faaa-46f4-8d21-ae1fa142da52-kube-api-access-n9qm7\") pod \"8ce20579-faaa-46f4-8d21-ae1fa142da52\" (UID: \"8ce20579-faaa-46f4-8d21-ae1fa142da52\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.428714 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ce20579-faaa-46f4-8d21-ae1fa142da52-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.428738 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c29bdd1f-3c65-4fbc-9365-b7f446afc233-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.432416 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c29bdd1f-3c65-4fbc-9365-b7f446afc233-kube-api-access-c9fjs" (OuterVolumeSpecName: "kube-api-access-c9fjs") pod "c29bdd1f-3c65-4fbc-9365-b7f446afc233" (UID: "c29bdd1f-3c65-4fbc-9365-b7f446afc233"). InnerVolumeSpecName "kube-api-access-c9fjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.432884 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ce20579-faaa-46f4-8d21-ae1fa142da52-kube-api-access-n9qm7" (OuterVolumeSpecName: "kube-api-access-n9qm7") pod "8ce20579-faaa-46f4-8d21-ae1fa142da52" (UID: "8ce20579-faaa-46f4-8d21-ae1fa142da52"). InnerVolumeSpecName "kube-api-access-n9qm7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.529530 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-dns-svc\") pod \"38cbf822-4dba-4db2-843a-05cc8133fe50\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.529639 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-config\") pod \"38cbf822-4dba-4db2-843a-05cc8133fe50\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.529786 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8ml9\" (UniqueName: \"kubernetes.io/projected/38cbf822-4dba-4db2-843a-05cc8133fe50-kube-api-access-g8ml9\") pod \"38cbf822-4dba-4db2-843a-05cc8133fe50\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.529912 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-nb\") pod \"38cbf822-4dba-4db2-843a-05cc8133fe50\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.529958 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-sb\") pod \"38cbf822-4dba-4db2-843a-05cc8133fe50\" (UID: \"38cbf822-4dba-4db2-843a-05cc8133fe50\") " Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.530446 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9fjs\" (UniqueName: \"kubernetes.io/projected/c29bdd1f-3c65-4fbc-9365-b7f446afc233-kube-api-access-c9fjs\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.530480 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9qm7\" (UniqueName: \"kubernetes.io/projected/8ce20579-faaa-46f4-8d21-ae1fa142da52-kube-api-access-n9qm7\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.537578 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38cbf822-4dba-4db2-843a-05cc8133fe50-kube-api-access-g8ml9" (OuterVolumeSpecName: "kube-api-access-g8ml9") pod "38cbf822-4dba-4db2-843a-05cc8133fe50" (UID: "38cbf822-4dba-4db2-843a-05cc8133fe50"). InnerVolumeSpecName "kube-api-access-g8ml9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.575458 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "38cbf822-4dba-4db2-843a-05cc8133fe50" (UID: "38cbf822-4dba-4db2-843a-05cc8133fe50"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.577457 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "38cbf822-4dba-4db2-843a-05cc8133fe50" (UID: "38cbf822-4dba-4db2-843a-05cc8133fe50"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.581450 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-config" (OuterVolumeSpecName: "config") pod "38cbf822-4dba-4db2-843a-05cc8133fe50" (UID: "38cbf822-4dba-4db2-843a-05cc8133fe50"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.581507 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "38cbf822-4dba-4db2-843a-05cc8133fe50" (UID: "38cbf822-4dba-4db2-843a-05cc8133fe50"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.631707 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.631742 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.631755 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.631764 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38cbf822-4dba-4db2-843a-05cc8133fe50-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.631772 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8ml9\" (UniqueName: \"kubernetes.io/projected/38cbf822-4dba-4db2-843a-05cc8133fe50-kube-api-access-g8ml9\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.774457 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-1668-account-create-update-7rp66" event={"ID":"c29bdd1f-3c65-4fbc-9365-b7f446afc233","Type":"ContainerDied","Data":"27264d64b7d8faf1d6223137aea0f82d6e0fdd79778a5320481dfdd4d59582fc"} Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.774518 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27264d64b7d8faf1d6223137aea0f82d6e0fdd79778a5320481dfdd4d59582fc" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.774592 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-1668-account-create-update-7rp66" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.781565 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8554648995-pbstb" event={"ID":"38cbf822-4dba-4db2-843a-05cc8133fe50","Type":"ContainerDied","Data":"b7db94327a2e3a6dbdaed33db1abd2ca285e9d33450b194e8cc383814b1d1bae"} Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.781635 4767 scope.go:117] "RemoveContainer" containerID="daa596200cd79711632fe9fa67c38b6ee3003b9229ff033adb4cc7d03e97e489" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.781870 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8554648995-pbstb" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.789133 4767 generic.go:334] "Generic (PLEG): container finished" podID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerID="3d481c8dcbb169378cc6a6e69a7028df854ea521a63e90c5e4e9eea8fbf1f230" exitCode=0 Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.789247 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd","Type":"ContainerDied","Data":"3d481c8dcbb169378cc6a6e69a7028df854ea521a63e90c5e4e9eea8fbf1f230"} Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.793098 4767 generic.go:334] "Generic (PLEG): container finished" podID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerID="d61967d9f7f7e59ac33e6585b330ce622d177128d25eda2f7e2b479e34bbd907" exitCode=0 Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.793167 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bbea8b85-5bb2-4570-83e7-07dafaade001","Type":"ContainerDied","Data":"d61967d9f7f7e59ac33e6585b330ce622d177128d25eda2f7e2b479e34bbd907"} Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.809401 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2rnmq" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.814827 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p8z4r" event={"ID":"0bc86a10-2d77-4909-aea5-23bb07841492","Type":"ContainerStarted","Data":"56859b981f16bce1bce3de22767122a976ca0e58026a6085bb6c77d92c36c9b5"} Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.814877 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2rnmq" event={"ID":"8ce20579-faaa-46f4-8d21-ae1fa142da52","Type":"ContainerDied","Data":"792d0bbd92aea009fa90032643672ccfd83d9d4830179f11244b6115554d212b"} Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.814892 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="792d0bbd92aea009fa90032643672ccfd83d9d4830179f11244b6115554d212b" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.833893 4767 scope.go:117] "RemoveContainer" containerID="a2139472d0e8fa7714931b5fac5bf82f1ac557d3d0db03aaf692847b4ebf08ef" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.855750 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-p8z4r" podStartSLOduration=1.916023663 podStartE2EDuration="8.855716675s" podCreationTimestamp="2026-01-28 18:48:58 +0000 UTC" firstStartedPulling="2026-01-28 18:48:59.164175973 +0000 UTC m=+1145.128358847" lastFinishedPulling="2026-01-28 18:49:06.103868985 +0000 UTC m=+1152.068051859" observedRunningTime="2026-01-28 18:49:06.852731281 +0000 UTC m=+1152.816914175" watchObservedRunningTime="2026-01-28 18:49:06.855716675 +0000 UTC m=+1152.819899549" Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.906092 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pbstb"] Jan 28 18:49:06 crc kubenswrapper[4767]: I0128 18:49:06.915633 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8554648995-pbstb"] Jan 28 18:49:07 crc kubenswrapper[4767]: I0128 18:49:07.828404 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd","Type":"ContainerStarted","Data":"0b430da8b0b7f87d140a51b6b6f8674570a1d1fc69beb8d27fe5333716e57cc7"} Jan 28 18:49:07 crc kubenswrapper[4767]: I0128 18:49:07.846260 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bbea8b85-5bb2-4570-83e7-07dafaade001","Type":"ContainerStarted","Data":"1dbdc25ff42287511a85e0c77517dcd153c4a98843b04855b2fd4416b5b4eeb7"} Jan 28 18:49:08 crc kubenswrapper[4767]: I0128 18:49:08.812746 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" path="/var/lib/kubelet/pods/38cbf822-4dba-4db2-843a-05cc8133fe50/volumes" Jan 28 18:49:10 crc kubenswrapper[4767]: I0128 18:49:10.426568 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:49:10 crc kubenswrapper[4767]: E0128 18:49:10.426810 4767 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 28 18:49:10 crc kubenswrapper[4767]: E0128 18:49:10.426841 4767 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 28 18:49:10 crc kubenswrapper[4767]: E0128 18:49:10.426908 4767 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift podName:c482494c-49e9-4314-a836-a7bea8f6f8c4 nodeName:}" failed. No retries permitted until 2026-01-28 18:49:26.426887509 +0000 UTC m=+1172.391070383 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift") pod "swift-storage-0" (UID: "c482494c-49e9-4314-a836-a7bea8f6f8c4") : configmap "swift-ring-files" not found Jan 28 18:49:10 crc kubenswrapper[4767]: I0128 18:49:10.867406 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 28 18:49:10 crc kubenswrapper[4767]: I0128 18:49:10.897884 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=40.912006064 podStartE2EDuration="1m4.897855717s" podCreationTimestamp="2026-01-28 18:48:06 +0000 UTC" firstStartedPulling="2026-01-28 18:48:09.349572428 +0000 UTC m=+1095.313755302" lastFinishedPulling="2026-01-28 18:48:33.335422081 +0000 UTC m=+1119.299604955" observedRunningTime="2026-01-28 18:49:10.892598783 +0000 UTC m=+1156.856781667" watchObservedRunningTime="2026-01-28 18:49:10.897855717 +0000 UTC m=+1156.862038591" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.197268 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8554648995-pbstb" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.110:5353: i/o timeout" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.697779 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-dq7h8" podUID="79640773-c4bb-4add-83a8-f9a39873bdef" containerName="ovn-controller" probeResult="failure" output=< Jan 28 18:49:11 crc kubenswrapper[4767]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 18:49:11 crc kubenswrapper[4767]: > Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.751757 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.797689 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-q46gj"] Jan 28 18:49:11 crc kubenswrapper[4767]: E0128 18:49:11.798267 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe833b02-be1c-45d7-88b0-8c18d9471631" containerName="mariadb-database-create" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798290 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe833b02-be1c-45d7-88b0-8c18d9471631" containerName="mariadb-database-create" Jan 28 18:49:11 crc kubenswrapper[4767]: E0128 18:49:11.798310 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ef954606-bfec-4472-8fff-04d3fa5e3bf0" containerName="mariadb-account-create-update" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798319 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="ef954606-bfec-4472-8fff-04d3fa5e3bf0" containerName="mariadb-account-create-update" Jan 28 18:49:11 crc kubenswrapper[4767]: E0128 18:49:11.798348 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ce20579-faaa-46f4-8d21-ae1fa142da52" containerName="mariadb-database-create" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798357 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ce20579-faaa-46f4-8d21-ae1fa142da52" containerName="mariadb-database-create" Jan 28 18:49:11 crc kubenswrapper[4767]: E0128 18:49:11.798374 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerName="dnsmasq-dns" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798382 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerName="dnsmasq-dns" Jan 28 18:49:11 crc kubenswrapper[4767]: E0128 18:49:11.798400 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c29bdd1f-3c65-4fbc-9365-b7f446afc233" containerName="mariadb-account-create-update" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798406 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c29bdd1f-3c65-4fbc-9365-b7f446afc233" containerName="mariadb-account-create-update" Jan 28 18:49:11 crc kubenswrapper[4767]: E0128 18:49:11.798422 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerName="init" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798430 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerName="init" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798650 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ce20579-faaa-46f4-8d21-ae1fa142da52" containerName="mariadb-database-create" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798670 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c29bdd1f-3c65-4fbc-9365-b7f446afc233" containerName="mariadb-account-create-update" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798680 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="ef954606-bfec-4472-8fff-04d3fa5e3bf0" containerName="mariadb-account-create-update" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798692 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="38cbf822-4dba-4db2-843a-05cc8133fe50" containerName="dnsmasq-dns" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.798706 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe833b02-be1c-45d7-88b0-8c18d9471631" containerName="mariadb-database-create" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.799517 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.809796 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.816631 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mvkq4" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.831459 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-q46gj"] Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.880391 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.910540 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=42.654336998 podStartE2EDuration="1m4.910524892s" podCreationTimestamp="2026-01-28 18:48:07 +0000 UTC" firstStartedPulling="2026-01-28 18:48:09.224455403 +0000 UTC m=+1095.188638277" lastFinishedPulling="2026-01-28 18:48:31.480643297 +0000 UTC m=+1117.444826171" observedRunningTime="2026-01-28 18:49:11.909983915 +0000 UTC m=+1157.874166789" watchObservedRunningTime="2026-01-28 18:49:11.910524892 +0000 UTC m=+1157.874707766" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.956058 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-combined-ca-bundle\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.956133 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-db-sync-config-data\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.956325 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-config-data\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:11 crc kubenswrapper[4767]: I0128 18:49:11.956372 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pnbfn\" (UniqueName: \"kubernetes.io/projected/b867495c-e01f-46a2-aa93-e42cd53d1b64-kube-api-access-pnbfn\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.057448 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-combined-ca-bundle\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.057608 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-db-sync-config-data\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.057716 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-config-data\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.057747 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pnbfn\" (UniqueName: \"kubernetes.io/projected/b867495c-e01f-46a2-aa93-e42cd53d1b64-kube-api-access-pnbfn\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.067370 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-db-sync-config-data\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.067506 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-combined-ca-bundle\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.068819 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-config-data\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.095927 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pnbfn\" (UniqueName: \"kubernetes.io/projected/b867495c-e01f-46a2-aa93-e42cd53d1b64-kube-api-access-pnbfn\") pod \"glance-db-sync-q46gj\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.137363 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q46gj" Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.770178 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-q46gj"] Jan 28 18:49:12 crc kubenswrapper[4767]: W0128 18:49:12.770188 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867495c_e01f_46a2_aa93_e42cd53d1b64.slice/crio-7b893dc97f2dfb8acff342e64aea2c92a96882afe8b306260d5eca86f8200be7 WatchSource:0}: Error finding container 7b893dc97f2dfb8acff342e64aea2c92a96882afe8b306260d5eca86f8200be7: Status 404 returned error can't find the container with id 7b893dc97f2dfb8acff342e64aea2c92a96882afe8b306260d5eca86f8200be7 Jan 28 18:49:12 crc kubenswrapper[4767]: I0128 18:49:12.893503 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q46gj" event={"ID":"b867495c-e01f-46a2-aa93-e42cd53d1b64","Type":"ContainerStarted","Data":"7b893dc97f2dfb8acff342e64aea2c92a96882afe8b306260d5eca86f8200be7"} Jan 28 18:49:15 crc kubenswrapper[4767]: I0128 18:49:15.455535 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:49:15 crc kubenswrapper[4767]: I0128 18:49:15.455883 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:49:15 crc kubenswrapper[4767]: I0128 18:49:15.921083 4767 generic.go:334] "Generic (PLEG): container finished" podID="0bc86a10-2d77-4909-aea5-23bb07841492" containerID="56859b981f16bce1bce3de22767122a976ca0e58026a6085bb6c77d92c36c9b5" exitCode=0 Jan 28 18:49:15 crc kubenswrapper[4767]: I0128 18:49:15.921145 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p8z4r" event={"ID":"0bc86a10-2d77-4909-aea5-23bb07841492","Type":"ContainerDied","Data":"56859b981f16bce1bce3de22767122a976ca0e58026a6085bb6c77d92c36c9b5"} Jan 28 18:49:16 crc kubenswrapper[4767]: I0128 18:49:16.696236 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-dq7h8" podUID="79640773-c4bb-4add-83a8-f9a39873bdef" containerName="ovn-controller" probeResult="failure" output=< Jan 28 18:49:16 crc kubenswrapper[4767]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 28 18:49:16 crc kubenswrapper[4767]: > Jan 28 18:49:16 crc kubenswrapper[4767]: I0128 18:49:16.746277 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-8cjhn" Jan 28 18:49:16 crc kubenswrapper[4767]: I0128 18:49:16.989162 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-dq7h8-config-82fgt"] Jan 28 18:49:16 crc kubenswrapper[4767]: I0128 18:49:16.990275 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:16 crc kubenswrapper[4767]: I0128 18:49:16.992350 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.006815 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dq7h8-config-82fgt"] Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.166285 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run-ovn\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.166758 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-additional-scripts\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.166828 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-log-ovn\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.166858 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvzbq\" (UniqueName: \"kubernetes.io/projected/e818d0dc-14fc-4e82-a375-45871ce0dd4a-kube-api-access-zvzbq\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.166894 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.166920 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-scripts\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.269518 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-scripts\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.269760 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run-ovn\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.269820 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-additional-scripts\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.269890 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-log-ovn\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.269935 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvzbq\" (UniqueName: \"kubernetes.io/projected/e818d0dc-14fc-4e82-a375-45871ce0dd4a-kube-api-access-zvzbq\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.269978 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.270121 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run-ovn\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.270394 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-log-ovn\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.270824 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-additional-scripts\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.270145 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.271537 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-scripts\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.296685 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvzbq\" (UniqueName: \"kubernetes.io/projected/e818d0dc-14fc-4e82-a375-45871ce0dd4a-kube-api-access-zvzbq\") pod \"ovn-controller-dq7h8-config-82fgt\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.314354 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.356458 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.473349 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-swiftconf\") pod \"0bc86a10-2d77-4909-aea5-23bb07841492\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.473578 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0bc86a10-2d77-4909-aea5-23bb07841492-etc-swift\") pod \"0bc86a10-2d77-4909-aea5-23bb07841492\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.473743 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-ring-data-devices\") pod \"0bc86a10-2d77-4909-aea5-23bb07841492\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.473960 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-combined-ca-bundle\") pod \"0bc86a10-2d77-4909-aea5-23bb07841492\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.474020 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-scripts\") pod \"0bc86a10-2d77-4909-aea5-23bb07841492\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.474171 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sfvw\" (UniqueName: \"kubernetes.io/projected/0bc86a10-2d77-4909-aea5-23bb07841492-kube-api-access-2sfvw\") pod \"0bc86a10-2d77-4909-aea5-23bb07841492\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.474240 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-dispersionconf\") pod \"0bc86a10-2d77-4909-aea5-23bb07841492\" (UID: \"0bc86a10-2d77-4909-aea5-23bb07841492\") " Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.475617 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0bc86a10-2d77-4909-aea5-23bb07841492-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "0bc86a10-2d77-4909-aea5-23bb07841492" (UID: "0bc86a10-2d77-4909-aea5-23bb07841492"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.476351 4767 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0bc86a10-2d77-4909-aea5-23bb07841492-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.476874 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "0bc86a10-2d77-4909-aea5-23bb07841492" (UID: "0bc86a10-2d77-4909-aea5-23bb07841492"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.488604 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0bc86a10-2d77-4909-aea5-23bb07841492-kube-api-access-2sfvw" (OuterVolumeSpecName: "kube-api-access-2sfvw") pod "0bc86a10-2d77-4909-aea5-23bb07841492" (UID: "0bc86a10-2d77-4909-aea5-23bb07841492"). InnerVolumeSpecName "kube-api-access-2sfvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.489576 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "0bc86a10-2d77-4909-aea5-23bb07841492" (UID: "0bc86a10-2d77-4909-aea5-23bb07841492"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.498287 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-scripts" (OuterVolumeSpecName: "scripts") pod "0bc86a10-2d77-4909-aea5-23bb07841492" (UID: "0bc86a10-2d77-4909-aea5-23bb07841492"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.500743 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0bc86a10-2d77-4909-aea5-23bb07841492" (UID: "0bc86a10-2d77-4909-aea5-23bb07841492"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.507910 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "0bc86a10-2d77-4909-aea5-23bb07841492" (UID: "0bc86a10-2d77-4909-aea5-23bb07841492"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.579254 4767 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.579769 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.579782 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0bc86a10-2d77-4909-aea5-23bb07841492-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.579794 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sfvw\" (UniqueName: \"kubernetes.io/projected/0bc86a10-2d77-4909-aea5-23bb07841492-kube-api-access-2sfvw\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.579810 4767 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.579821 4767 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0bc86a10-2d77-4909-aea5-23bb07841492-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.821097 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-dq7h8-config-82fgt"] Jan 28 18:49:17 crc kubenswrapper[4767]: W0128 18:49:17.822550 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode818d0dc_14fc_4e82_a375_45871ce0dd4a.slice/crio-f2ffbc4462224dbe34b7990fe249fc67e969e319b573f94663cbc6fa33428211 WatchSource:0}: Error finding container f2ffbc4462224dbe34b7990fe249fc67e969e319b573f94663cbc6fa33428211: Status 404 returned error can't find the container with id f2ffbc4462224dbe34b7990fe249fc67e969e319b573f94663cbc6fa33428211 Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.944848 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p8z4r" event={"ID":"0bc86a10-2d77-4909-aea5-23bb07841492","Type":"ContainerDied","Data":"7b43b2fb9b1e7bd50c92c7d1f3bb2ad51b5832b4c75e30d7eb1d0599ca7cc135"} Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.944911 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b43b2fb9b1e7bd50c92c7d1f3bb2ad51b5832b4c75e30d7eb1d0599ca7cc135" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.944864 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p8z4r" Jan 28 18:49:17 crc kubenswrapper[4767]: I0128 18:49:17.946326 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dq7h8-config-82fgt" event={"ID":"e818d0dc-14fc-4e82-a375-45871ce0dd4a","Type":"ContainerStarted","Data":"f2ffbc4462224dbe34b7990fe249fc67e969e319b573f94663cbc6fa33428211"} Jan 28 18:49:18 crc kubenswrapper[4767]: I0128 18:49:18.321929 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Jan 28 18:49:18 crc kubenswrapper[4767]: I0128 18:49:18.461919 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Jan 28 18:49:18 crc kubenswrapper[4767]: I0128 18:49:18.962908 4767 generic.go:334] "Generic (PLEG): container finished" podID="e818d0dc-14fc-4e82-a375-45871ce0dd4a" containerID="08ffc0282e64807b1090c35db34c3f34f2a02e18df344f0201d7ea0034004795" exitCode=0 Jan 28 18:49:18 crc kubenswrapper[4767]: I0128 18:49:18.962960 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dq7h8-config-82fgt" event={"ID":"e818d0dc-14fc-4e82-a375-45871ce0dd4a","Type":"ContainerDied","Data":"08ffc0282e64807b1090c35db34c3f34f2a02e18df344f0201d7ea0034004795"} Jan 28 18:49:21 crc kubenswrapper[4767]: I0128 18:49:21.696670 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-dq7h8" Jan 28 18:49:26 crc kubenswrapper[4767]: I0128 18:49:26.476980 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:49:26 crc kubenswrapper[4767]: I0128 18:49:26.486561 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/c482494c-49e9-4314-a836-a7bea8f6f8c4-etc-swift\") pod \"swift-storage-0\" (UID: \"c482494c-49e9-4314-a836-a7bea8f6f8c4\") " pod="openstack/swift-storage-0" Jan 28 18:49:26 crc kubenswrapper[4767]: I0128 18:49:26.573391 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 28 18:49:27 crc kubenswrapper[4767]: E0128 18:49:27.517565 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Jan 28 18:49:27 crc kubenswrapper[4767]: E0128 18:49:27.518485 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pnbfn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-q46gj_openstack(b867495c-e01f-46a2-aa93-e42cd53d1b64): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.519590 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:27 crc kubenswrapper[4767]: E0128 18:49:27.522546 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-q46gj" podUID="b867495c-e01f-46a2-aa93-e42cd53d1b64" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700058 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-log-ovn\") pod \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700181 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-scripts\") pod \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700242 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run\") pod \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700309 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "e818d0dc-14fc-4e82-a375-45871ce0dd4a" (UID: "e818d0dc-14fc-4e82-a375-45871ce0dd4a"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700378 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "e818d0dc-14fc-4e82-a375-45871ce0dd4a" (UID: "e818d0dc-14fc-4e82-a375-45871ce0dd4a"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700344 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run-ovn\") pod \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700424 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run" (OuterVolumeSpecName: "var-run") pod "e818d0dc-14fc-4e82-a375-45871ce0dd4a" (UID: "e818d0dc-14fc-4e82-a375-45871ce0dd4a"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700570 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-additional-scripts\") pod \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.700696 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvzbq\" (UniqueName: \"kubernetes.io/projected/e818d0dc-14fc-4e82-a375-45871ce0dd4a-kube-api-access-zvzbq\") pod \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\" (UID: \"e818d0dc-14fc-4e82-a375-45871ce0dd4a\") " Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.701893 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "e818d0dc-14fc-4e82-a375-45871ce0dd4a" (UID: "e818d0dc-14fc-4e82-a375-45871ce0dd4a"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.702190 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-scripts" (OuterVolumeSpecName: "scripts") pod "e818d0dc-14fc-4e82-a375-45871ce0dd4a" (UID: "e818d0dc-14fc-4e82-a375-45871ce0dd4a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.702320 4767 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.702341 4767 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.702355 4767 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/e818d0dc-14fc-4e82-a375-45871ce0dd4a-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.711191 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e818d0dc-14fc-4e82-a375-45871ce0dd4a-kube-api-access-zvzbq" (OuterVolumeSpecName: "kube-api-access-zvzbq") pod "e818d0dc-14fc-4e82-a375-45871ce0dd4a" (UID: "e818d0dc-14fc-4e82-a375-45871ce0dd4a"). InnerVolumeSpecName "kube-api-access-zvzbq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.803652 4767 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.803706 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvzbq\" (UniqueName: \"kubernetes.io/projected/e818d0dc-14fc-4e82-a375-45871ce0dd4a-kube-api-access-zvzbq\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.803720 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e818d0dc-14fc-4e82-a375-45871ce0dd4a-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:27 crc kubenswrapper[4767]: I0128 18:49:27.997796 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.080768 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-dq7h8-config-82fgt" event={"ID":"e818d0dc-14fc-4e82-a375-45871ce0dd4a","Type":"ContainerDied","Data":"f2ffbc4462224dbe34b7990fe249fc67e969e319b573f94663cbc6fa33428211"} Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.080828 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2ffbc4462224dbe34b7990fe249fc67e969e319b573f94663cbc6fa33428211" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.080848 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-dq7h8-config-82fgt" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.082343 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"26ad4a4696714cc4ece69f821be95f34b736a790c45a54b79c099ef422dc528a"} Jan 28 18:49:28 crc kubenswrapper[4767]: E0128 18:49:28.084272 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-q46gj" podUID="b867495c-e01f-46a2-aa93-e42cd53d1b64" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.321495 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.463507 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.679002 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-dq7h8-config-82fgt"] Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.687986 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-dq7h8-config-82fgt"] Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.803930 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e818d0dc-14fc-4e82-a375-45871ce0dd4a" path="/var/lib/kubelet/pods/e818d0dc-14fc-4e82-a375-45871ce0dd4a/volumes" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.904233 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-6dl26"] Jan 28 18:49:28 crc kubenswrapper[4767]: E0128 18:49:28.904762 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0bc86a10-2d77-4909-aea5-23bb07841492" containerName="swift-ring-rebalance" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.904786 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0bc86a10-2d77-4909-aea5-23bb07841492" containerName="swift-ring-rebalance" Jan 28 18:49:28 crc kubenswrapper[4767]: E0128 18:49:28.904806 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e818d0dc-14fc-4e82-a375-45871ce0dd4a" containerName="ovn-config" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.904814 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e818d0dc-14fc-4e82-a375-45871ce0dd4a" containerName="ovn-config" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.905025 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0bc86a10-2d77-4909-aea5-23bb07841492" containerName="swift-ring-rebalance" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.905059 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e818d0dc-14fc-4e82-a375-45871ce0dd4a" containerName="ovn-config" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.905789 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:28 crc kubenswrapper[4767]: I0128 18:49:28.942175 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6dl26"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.028193 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebb33a9-654c-4edc-b487-7b2a08c424c6-operator-scripts\") pod \"cinder-db-create-6dl26\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.028303 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4sg4\" (UniqueName: \"kubernetes.io/projected/8ebb33a9-654c-4edc-b487-7b2a08c424c6-kube-api-access-b4sg4\") pod \"cinder-db-create-6dl26\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.080550 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-9qfw8"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.081684 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.119443 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-39de-account-create-update-cc7p9"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.120816 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.123099 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.129522 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebb33a9-654c-4edc-b487-7b2a08c424c6-operator-scripts\") pod \"cinder-db-create-6dl26\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.129596 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4sg4\" (UniqueName: \"kubernetes.io/projected/8ebb33a9-654c-4edc-b487-7b2a08c424c6-kube-api-access-b4sg4\") pod \"cinder-db-create-6dl26\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.130636 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebb33a9-654c-4edc-b487-7b2a08c424c6-operator-scripts\") pod \"cinder-db-create-6dl26\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.150000 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-39de-account-create-update-cc7p9"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.164387 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4sg4\" (UniqueName: \"kubernetes.io/projected/8ebb33a9-654c-4edc-b487-7b2a08c424c6-kube-api-access-b4sg4\") pod \"cinder-db-create-6dl26\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.172027 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-9qfw8"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.225303 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-b47a-account-create-update-ngbck"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.225729 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.227272 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.231703 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l4tcj\" (UniqueName: \"kubernetes.io/projected/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-kube-api-access-l4tcj\") pod \"barbican-db-create-9qfw8\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.231738 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-75l2f\" (UniqueName: \"kubernetes.io/projected/5173a106-3a63-4c67-8b28-d30ccf1fea1c-kube-api-access-75l2f\") pod \"cinder-39de-account-create-update-cc7p9\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.231766 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5173a106-3a63-4c67-8b28-d30ccf1fea1c-operator-scripts\") pod \"cinder-39de-account-create-update-cc7p9\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.231791 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-operator-scripts\") pod \"barbican-db-create-9qfw8\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.236584 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b47a-account-create-update-ngbck"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.237543 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.334256 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nzzbz\" (UniqueName: \"kubernetes.io/projected/fced9a49-f6fd-4f6f-a71d-b7951315008f-kube-api-access-nzzbz\") pod \"barbican-b47a-account-create-update-ngbck\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.334765 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fced9a49-f6fd-4f6f-a71d-b7951315008f-operator-scripts\") pod \"barbican-b47a-account-create-update-ngbck\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.334809 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-75l2f\" (UniqueName: \"kubernetes.io/projected/5173a106-3a63-4c67-8b28-d30ccf1fea1c-kube-api-access-75l2f\") pod \"cinder-39de-account-create-update-cc7p9\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.334828 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l4tcj\" (UniqueName: \"kubernetes.io/projected/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-kube-api-access-l4tcj\") pod \"barbican-db-create-9qfw8\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.334847 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5173a106-3a63-4c67-8b28-d30ccf1fea1c-operator-scripts\") pod \"cinder-39de-account-create-update-cc7p9\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.334866 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-operator-scripts\") pod \"barbican-db-create-9qfw8\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.335701 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-operator-scripts\") pod \"barbican-db-create-9qfw8\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.336917 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5173a106-3a63-4c67-8b28-d30ccf1fea1c-operator-scripts\") pod \"cinder-39de-account-create-update-cc7p9\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.361903 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-create-m5cgt"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.363155 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.405275 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-q4h5g"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.406696 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.412899 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.413098 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.412907 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.413432 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lb5dl" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.423943 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-5774-account-create-update-8ptkt"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.425250 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.428638 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-db-secret" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.437800 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nzzbz\" (UniqueName: \"kubernetes.io/projected/fced9a49-f6fd-4f6f-a71d-b7951315008f-kube-api-access-nzzbz\") pod \"barbican-b47a-account-create-update-ngbck\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.437862 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fced9a49-f6fd-4f6f-a71d-b7951315008f-operator-scripts\") pod \"barbican-b47a-account-create-update-ngbck\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.439278 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fced9a49-f6fd-4f6f-a71d-b7951315008f-operator-scripts\") pod \"barbican-b47a-account-create-update-ngbck\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.444553 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-75l2f\" (UniqueName: \"kubernetes.io/projected/5173a106-3a63-4c67-8b28-d30ccf1fea1c-kube-api-access-75l2f\") pod \"cinder-39de-account-create-update-cc7p9\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.450050 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-m5cgt"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.456290 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l4tcj\" (UniqueName: \"kubernetes.io/projected/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-kube-api-access-l4tcj\") pod \"barbican-db-create-9qfw8\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.479869 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-q4h5g"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.494047 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nzzbz\" (UniqueName: \"kubernetes.io/projected/fced9a49-f6fd-4f6f-a71d-b7951315008f-kube-api-access-nzzbz\") pod \"barbican-b47a-account-create-update-ngbck\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.507061 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-5774-account-create-update-8ptkt"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.545558 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f574l\" (UniqueName: \"kubernetes.io/projected/2df7ec48-f703-40db-9f31-f4f216de7935-kube-api-access-f574l\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.545646 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a81f2b7-8862-4a59-bf90-23a8310a9b19-operator-scripts\") pod \"heat-db-create-m5cgt\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.545672 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-config-data\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.545692 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-combined-ca-bundle\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.545718 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dtzg\" (UniqueName: \"kubernetes.io/projected/eb33b8a5-83e7-4431-b6fa-489b022f600e-kube-api-access-8dtzg\") pod \"heat-5774-account-create-update-8ptkt\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.545756 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ft7d6\" (UniqueName: \"kubernetes.io/projected/8a81f2b7-8862-4a59-bf90-23a8310a9b19-kube-api-access-ft7d6\") pod \"heat-db-create-m5cgt\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.545792 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb33b8a5-83e7-4431-b6fa-489b022f600e-operator-scripts\") pod \"heat-5774-account-create-update-8ptkt\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.567483 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-c777-account-create-update-rg925"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.569117 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.576245 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.598972 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c777-account-create-update-rg925"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.652012 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ft7d6\" (UniqueName: \"kubernetes.io/projected/8a81f2b7-8862-4a59-bf90-23a8310a9b19-kube-api-access-ft7d6\") pod \"heat-db-create-m5cgt\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.652114 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb33b8a5-83e7-4431-b6fa-489b022f600e-operator-scripts\") pod \"heat-5774-account-create-update-8ptkt\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.652249 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f574l\" (UniqueName: \"kubernetes.io/projected/2df7ec48-f703-40db-9f31-f4f216de7935-kube-api-access-f574l\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.652315 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a81f2b7-8862-4a59-bf90-23a8310a9b19-operator-scripts\") pod \"heat-db-create-m5cgt\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.652342 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-config-data\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.652377 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-combined-ca-bundle\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.652408 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dtzg\" (UniqueName: \"kubernetes.io/projected/eb33b8a5-83e7-4431-b6fa-489b022f600e-kube-api-access-8dtzg\") pod \"heat-5774-account-create-update-8ptkt\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.653914 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb33b8a5-83e7-4431-b6fa-489b022f600e-operator-scripts\") pod \"heat-5774-account-create-update-8ptkt\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.654109 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a81f2b7-8862-4a59-bf90-23a8310a9b19-operator-scripts\") pod \"heat-db-create-m5cgt\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.654525 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.658626 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-885qk"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.692027 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-config-data\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.704235 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-combined-ca-bundle\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.704921 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-885qk" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.723433 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f574l\" (UniqueName: \"kubernetes.io/projected/2df7ec48-f703-40db-9f31-f4f216de7935-kube-api-access-f574l\") pod \"keystone-db-sync-q4h5g\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.728872 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dtzg\" (UniqueName: \"kubernetes.io/projected/eb33b8a5-83e7-4431-b6fa-489b022f600e-kube-api-access-8dtzg\") pod \"heat-5774-account-create-update-8ptkt\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.732138 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ft7d6\" (UniqueName: \"kubernetes.io/projected/8a81f2b7-8862-4a59-bf90-23a8310a9b19-kube-api-access-ft7d6\") pod \"heat-db-create-m5cgt\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.743283 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.743838 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.761758 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/927eabd5-6408-478b-8740-3897f0efcb05-operator-scripts\") pod \"neutron-db-create-885qk\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " pod="openstack/neutron-db-create-885qk" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.762388 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pw6bq\" (UniqueName: \"kubernetes.io/projected/3c38d151-db9b-43d4-ab60-44b7f3169fd2-kube-api-access-pw6bq\") pod \"neutron-c777-account-create-update-rg925\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.762468 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c38d151-db9b-43d4-ab60-44b7f3169fd2-operator-scripts\") pod \"neutron-c777-account-create-update-rg925\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.762522 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-798ll\" (UniqueName: \"kubernetes.io/projected/927eabd5-6408-478b-8740-3897f0efcb05-kube-api-access-798ll\") pod \"neutron-db-create-885qk\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " pod="openstack/neutron-db-create-885qk" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.818258 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-885qk"] Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.827714 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.863276 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.865078 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/927eabd5-6408-478b-8740-3897f0efcb05-operator-scripts\") pod \"neutron-db-create-885qk\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " pod="openstack/neutron-db-create-885qk" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.865332 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pw6bq\" (UniqueName: \"kubernetes.io/projected/3c38d151-db9b-43d4-ab60-44b7f3169fd2-kube-api-access-pw6bq\") pod \"neutron-c777-account-create-update-rg925\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.865462 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c38d151-db9b-43d4-ab60-44b7f3169fd2-operator-scripts\") pod \"neutron-c777-account-create-update-rg925\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.865488 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-798ll\" (UniqueName: \"kubernetes.io/projected/927eabd5-6408-478b-8740-3897f0efcb05-kube-api-access-798ll\") pod \"neutron-db-create-885qk\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " pod="openstack/neutron-db-create-885qk" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.866594 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/927eabd5-6408-478b-8740-3897f0efcb05-operator-scripts\") pod \"neutron-db-create-885qk\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " pod="openstack/neutron-db-create-885qk" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.867055 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c38d151-db9b-43d4-ab60-44b7f3169fd2-operator-scripts\") pod \"neutron-c777-account-create-update-rg925\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.894847 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-798ll\" (UniqueName: \"kubernetes.io/projected/927eabd5-6408-478b-8740-3897f0efcb05-kube-api-access-798ll\") pod \"neutron-db-create-885qk\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " pod="openstack/neutron-db-create-885qk" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.897941 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pw6bq\" (UniqueName: \"kubernetes.io/projected/3c38d151-db9b-43d4-ab60-44b7f3169fd2-kube-api-access-pw6bq\") pod \"neutron-c777-account-create-update-rg925\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:29 crc kubenswrapper[4767]: I0128 18:49:29.936104 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.001884 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.059530 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-885qk" Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.181826 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6dl26"] Jan 28 18:49:30 crc kubenswrapper[4767]: W0128 18:49:30.215366 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8ebb33a9_654c_4edc_b487_7b2a08c424c6.slice/crio-2359cba7024026118dafaeb2299f2c06f0d9a8c7274c07180dc3db4bb924c528 WatchSource:0}: Error finding container 2359cba7024026118dafaeb2299f2c06f0d9a8c7274c07180dc3db4bb924c528: Status 404 returned error can't find the container with id 2359cba7024026118dafaeb2299f2c06f0d9a8c7274c07180dc3db4bb924c528 Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.574391 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-39de-account-create-update-cc7p9"] Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.596647 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-q4h5g"] Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.780846 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-9qfw8"] Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.817970 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-b47a-account-create-update-ngbck"] Jan 28 18:49:30 crc kubenswrapper[4767]: I0128 18:49:30.874006 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-5774-account-create-update-8ptkt"] Jan 28 18:49:31 crc kubenswrapper[4767]: I0128 18:49:31.054648 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c777-account-create-update-rg925"] Jan 28 18:49:31 crc kubenswrapper[4767]: I0128 18:49:31.067887 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-create-m5cgt"] Jan 28 18:49:31 crc kubenswrapper[4767]: I0128 18:49:31.090572 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-885qk"] Jan 28 18:49:31 crc kubenswrapper[4767]: I0128 18:49:31.122816 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-39de-account-create-update-cc7p9" event={"ID":"5173a106-3a63-4c67-8b28-d30ccf1fea1c","Type":"ContainerStarted","Data":"b4312608902cc7bd75149b1d13bf33c9be26997dee57600c0c72d32b74231512"} Jan 28 18:49:31 crc kubenswrapper[4767]: I0128 18:49:31.135830 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6dl26" event={"ID":"8ebb33a9-654c-4edc-b487-7b2a08c424c6","Type":"ContainerStarted","Data":"0db59ac617bfc346f657797ad47c6cde7a7226af5e20893b38842784fa29c9da"} Jan 28 18:49:31 crc kubenswrapper[4767]: I0128 18:49:31.135887 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6dl26" event={"ID":"8ebb33a9-654c-4edc-b487-7b2a08c424c6","Type":"ContainerStarted","Data":"2359cba7024026118dafaeb2299f2c06f0d9a8c7274c07180dc3db4bb924c528"} Jan 28 18:49:31 crc kubenswrapper[4767]: I0128 18:49:31.162169 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-6dl26" podStartSLOduration=3.162147035 podStartE2EDuration="3.162147035s" podCreationTimestamp="2026-01-28 18:49:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:31.161360411 +0000 UTC m=+1177.125543295" watchObservedRunningTime="2026-01-28 18:49:31.162147035 +0000 UTC m=+1177.126329899" Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.153270 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b47a-account-create-update-ngbck" event={"ID":"fced9a49-f6fd-4f6f-a71d-b7951315008f","Type":"ContainerStarted","Data":"c37aa327196f88cd03f74e74cdd6f61689bdc1117e5c25713a8d4aa8f91457ec"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.159020 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-m5cgt" event={"ID":"8a81f2b7-8862-4a59-bf90-23a8310a9b19","Type":"ContainerStarted","Data":"a31d4a620ce542db08c9c2cc12cbb516fca55a8d14ca6c321bd59d8c7df81441"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.164618 4767 generic.go:334] "Generic (PLEG): container finished" podID="8ebb33a9-654c-4edc-b487-7b2a08c424c6" containerID="0db59ac617bfc346f657797ad47c6cde7a7226af5e20893b38842784fa29c9da" exitCode=0 Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.164746 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6dl26" event={"ID":"8ebb33a9-654c-4edc-b487-7b2a08c424c6","Type":"ContainerDied","Data":"0db59ac617bfc346f657797ad47c6cde7a7226af5e20893b38842784fa29c9da"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.188870 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-q4h5g" event={"ID":"2df7ec48-f703-40db-9f31-f4f216de7935","Type":"ContainerStarted","Data":"177eb935a27fa30020e476b3663f27205fe3a4601113ed07839b357a534b6e0f"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.195565 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-885qk" event={"ID":"927eabd5-6408-478b-8740-3897f0efcb05","Type":"ContainerStarted","Data":"a9c801cc80623245505149773d45b9bfb92761cc181ed3f996695139e05c322e"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.197121 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-9qfw8" event={"ID":"a4fd6701-6878-4f65-b3aa-fd437ec22ba2","Type":"ContainerStarted","Data":"3ab3cf9da08492037582791258c6b8d30b88f05e10071c4bb1600f7529768287"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.206752 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c777-account-create-update-rg925" event={"ID":"3c38d151-db9b-43d4-ab60-44b7f3169fd2","Type":"ContainerStarted","Data":"0d7801abcf6daf55d5dcc9b31ec96d66f2ce629914e4496a60128881ab536979"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.211544 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-39de-account-create-update-cc7p9" event={"ID":"5173a106-3a63-4c67-8b28-d30ccf1fea1c","Type":"ContainerStarted","Data":"ad3c363464601bbaf2037b7cd24bb10690988168b7388dc4d7d9d0b205f29eea"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.219928 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-5774-account-create-update-8ptkt" event={"ID":"eb33b8a5-83e7-4431-b6fa-489b022f600e","Type":"ContainerStarted","Data":"3411901c2af49c12222933bdd5d4f82c1039997c6aecd44390366c79c983da89"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.219993 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-5774-account-create-update-8ptkt" event={"ID":"eb33b8a5-83e7-4431-b6fa-489b022f600e","Type":"ContainerStarted","Data":"b665ea0fb4c79e28277e38e4a919b34f113eed0ca1fbef8428c6be425b81b3cc"} Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.241026 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-c777-account-create-update-rg925" podStartSLOduration=3.241003667 podStartE2EDuration="3.241003667s" podCreationTimestamp="2026-01-28 18:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:32.236431135 +0000 UTC m=+1178.200614009" watchObservedRunningTime="2026-01-28 18:49:32.241003667 +0000 UTC m=+1178.205186541" Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.272652 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-5774-account-create-update-8ptkt" podStartSLOduration=3.272614845 podStartE2EDuration="3.272614845s" podCreationTimestamp="2026-01-28 18:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:32.269484037 +0000 UTC m=+1178.233666911" watchObservedRunningTime="2026-01-28 18:49:32.272614845 +0000 UTC m=+1178.236797719" Jan 28 18:49:32 crc kubenswrapper[4767]: I0128 18:49:32.301132 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-39de-account-create-update-cc7p9" podStartSLOduration=3.301103804 podStartE2EDuration="3.301103804s" podCreationTimestamp="2026-01-28 18:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:32.294168517 +0000 UTC m=+1178.258351391" watchObservedRunningTime="2026-01-28 18:49:32.301103804 +0000 UTC m=+1178.265286678" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.251939 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-885qk" event={"ID":"927eabd5-6408-478b-8740-3897f0efcb05","Type":"ContainerStarted","Data":"b68b5de623d0808960949f320fa16bab631ec0ed912b4cb25786d725656546b4"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.260313 4767 generic.go:334] "Generic (PLEG): container finished" podID="a4fd6701-6878-4f65-b3aa-fd437ec22ba2" containerID="5c147555279081912bdef04baf605e41f62d029008869b2b65d655b149dff921" exitCode=0 Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.260394 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-9qfw8" event={"ID":"a4fd6701-6878-4f65-b3aa-fd437ec22ba2","Type":"ContainerDied","Data":"5c147555279081912bdef04baf605e41f62d029008869b2b65d655b149dff921"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.272251 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-m5cgt" event={"ID":"8a81f2b7-8862-4a59-bf90-23a8310a9b19","Type":"ContainerStarted","Data":"20c8be4ec78c94967c72b6c310a4ee508ea503939cc24f9830fba93fc0d942ab"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.276730 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c777-account-create-update-rg925" event={"ID":"3c38d151-db9b-43d4-ab60-44b7f3169fd2","Type":"ContainerStarted","Data":"583d0abace9a26ea2c78066a5abf0e36aff5c7002d177025bba69504223e9ba7"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.285395 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-885qk" podStartSLOduration=4.285362732 podStartE2EDuration="4.285362732s" podCreationTimestamp="2026-01-28 18:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:33.284081171 +0000 UTC m=+1179.248264045" watchObservedRunningTime="2026-01-28 18:49:33.285362732 +0000 UTC m=+1179.249545606" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.289313 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"4da5788506f8f50b9e5fda53108231e53b1b6fd3da19972ded986f25dcd172b4"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.289373 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"6645088b6e08a364b9cb4b454483bc7be6249fe922c24bc7de192c8e2a39fb67"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.289387 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"53b45972223dbff2137c097c2b9d2496599886c064aed91238d99b313052859a"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.300660 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b47a-account-create-update-ngbck" event={"ID":"fced9a49-f6fd-4f6f-a71d-b7951315008f","Type":"ContainerStarted","Data":"5cc39b115922ca7184e378a6ee63563e811ca896a6cf442e71bc32d5dacf16d7"} Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.334308 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-create-m5cgt" podStartSLOduration=4.334276169 podStartE2EDuration="4.334276169s" podCreationTimestamp="2026-01-28 18:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:33.315042668 +0000 UTC m=+1179.279225532" watchObservedRunningTime="2026-01-28 18:49:33.334276169 +0000 UTC m=+1179.298459043" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.367441 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-b47a-account-create-update-ngbck" podStartSLOduration=4.367417364 podStartE2EDuration="4.367417364s" podCreationTimestamp="2026-01-28 18:49:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:33.362416808 +0000 UTC m=+1179.326599682" watchObservedRunningTime="2026-01-28 18:49:33.367417364 +0000 UTC m=+1179.331600238" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.738812 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.886011 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebb33a9-654c-4edc-b487-7b2a08c424c6-operator-scripts\") pod \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.886369 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4sg4\" (UniqueName: \"kubernetes.io/projected/8ebb33a9-654c-4edc-b487-7b2a08c424c6-kube-api-access-b4sg4\") pod \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\" (UID: \"8ebb33a9-654c-4edc-b487-7b2a08c424c6\") " Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.887144 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ebb33a9-654c-4edc-b487-7b2a08c424c6-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8ebb33a9-654c-4edc-b487-7b2a08c424c6" (UID: "8ebb33a9-654c-4edc-b487-7b2a08c424c6"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.896253 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ebb33a9-654c-4edc-b487-7b2a08c424c6-kube-api-access-b4sg4" (OuterVolumeSpecName: "kube-api-access-b4sg4") pod "8ebb33a9-654c-4edc-b487-7b2a08c424c6" (UID: "8ebb33a9-654c-4edc-b487-7b2a08c424c6"). InnerVolumeSpecName "kube-api-access-b4sg4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.988902 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4sg4\" (UniqueName: \"kubernetes.io/projected/8ebb33a9-654c-4edc-b487-7b2a08c424c6-kube-api-access-b4sg4\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:33 crc kubenswrapper[4767]: I0128 18:49:33.988956 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8ebb33a9-654c-4edc-b487-7b2a08c424c6-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.324561 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"9b6d382d268c98df5e0f678bf2b332e67dd1de121989137684fef114753697e1"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.341994 4767 generic.go:334] "Generic (PLEG): container finished" podID="fced9a49-f6fd-4f6f-a71d-b7951315008f" containerID="5cc39b115922ca7184e378a6ee63563e811ca896a6cf442e71bc32d5dacf16d7" exitCode=0 Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.342078 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b47a-account-create-update-ngbck" event={"ID":"fced9a49-f6fd-4f6f-a71d-b7951315008f","Type":"ContainerDied","Data":"5cc39b115922ca7184e378a6ee63563e811ca896a6cf442e71bc32d5dacf16d7"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.344733 4767 generic.go:334] "Generic (PLEG): container finished" podID="927eabd5-6408-478b-8740-3897f0efcb05" containerID="b68b5de623d0808960949f320fa16bab631ec0ed912b4cb25786d725656546b4" exitCode=0 Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.344799 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-885qk" event={"ID":"927eabd5-6408-478b-8740-3897f0efcb05","Type":"ContainerDied","Data":"b68b5de623d0808960949f320fa16bab631ec0ed912b4cb25786d725656546b4"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.349710 4767 generic.go:334] "Generic (PLEG): container finished" podID="8a81f2b7-8862-4a59-bf90-23a8310a9b19" containerID="20c8be4ec78c94967c72b6c310a4ee508ea503939cc24f9830fba93fc0d942ab" exitCode=0 Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.349807 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-m5cgt" event={"ID":"8a81f2b7-8862-4a59-bf90-23a8310a9b19","Type":"ContainerDied","Data":"20c8be4ec78c94967c72b6c310a4ee508ea503939cc24f9830fba93fc0d942ab"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.353336 4767 generic.go:334] "Generic (PLEG): container finished" podID="3c38d151-db9b-43d4-ab60-44b7f3169fd2" containerID="583d0abace9a26ea2c78066a5abf0e36aff5c7002d177025bba69504223e9ba7" exitCode=0 Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.353435 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c777-account-create-update-rg925" event={"ID":"3c38d151-db9b-43d4-ab60-44b7f3169fd2","Type":"ContainerDied","Data":"583d0abace9a26ea2c78066a5abf0e36aff5c7002d177025bba69504223e9ba7"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.356303 4767 generic.go:334] "Generic (PLEG): container finished" podID="5173a106-3a63-4c67-8b28-d30ccf1fea1c" containerID="ad3c363464601bbaf2037b7cd24bb10690988168b7388dc4d7d9d0b205f29eea" exitCode=0 Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.356364 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-39de-account-create-update-cc7p9" event={"ID":"5173a106-3a63-4c67-8b28-d30ccf1fea1c","Type":"ContainerDied","Data":"ad3c363464601bbaf2037b7cd24bb10690988168b7388dc4d7d9d0b205f29eea"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.365730 4767 generic.go:334] "Generic (PLEG): container finished" podID="eb33b8a5-83e7-4431-b6fa-489b022f600e" containerID="3411901c2af49c12222933bdd5d4f82c1039997c6aecd44390366c79c983da89" exitCode=0 Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.365886 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-5774-account-create-update-8ptkt" event={"ID":"eb33b8a5-83e7-4431-b6fa-489b022f600e","Type":"ContainerDied","Data":"3411901c2af49c12222933bdd5d4f82c1039997c6aecd44390366c79c983da89"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.370289 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6dl26" event={"ID":"8ebb33a9-654c-4edc-b487-7b2a08c424c6","Type":"ContainerDied","Data":"2359cba7024026118dafaeb2299f2c06f0d9a8c7274c07180dc3db4bb924c528"} Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.370318 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2359cba7024026118dafaeb2299f2c06f0d9a8c7274c07180dc3db4bb924c528" Jan 28 18:49:34 crc kubenswrapper[4767]: I0128 18:49:34.370421 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6dl26" Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.272115 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.386493 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-9qfw8" Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.386712 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-9qfw8" event={"ID":"a4fd6701-6878-4f65-b3aa-fd437ec22ba2","Type":"ContainerDied","Data":"3ab3cf9da08492037582791258c6b8d30b88f05e10071c4bb1600f7529768287"} Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.387514 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3ab3cf9da08492037582791258c6b8d30b88f05e10071c4bb1600f7529768287" Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.420084 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l4tcj\" (UniqueName: \"kubernetes.io/projected/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-kube-api-access-l4tcj\") pod \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.420304 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-operator-scripts\") pod \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\" (UID: \"a4fd6701-6878-4f65-b3aa-fd437ec22ba2\") " Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.422240 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a4fd6701-6878-4f65-b3aa-fd437ec22ba2" (UID: "a4fd6701-6878-4f65-b3aa-fd437ec22ba2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.429495 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-kube-api-access-l4tcj" (OuterVolumeSpecName: "kube-api-access-l4tcj") pod "a4fd6701-6878-4f65-b3aa-fd437ec22ba2" (UID: "a4fd6701-6878-4f65-b3aa-fd437ec22ba2"). InnerVolumeSpecName "kube-api-access-l4tcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.523568 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:35 crc kubenswrapper[4767]: I0128 18:49:35.523610 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l4tcj\" (UniqueName: \"kubernetes.io/projected/a4fd6701-6878-4f65-b3aa-fd437ec22ba2-kube-api-access-l4tcj\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:39 crc kubenswrapper[4767]: I0128 18:49:39.970942 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-885qk" Jan 28 18:49:39 crc kubenswrapper[4767]: I0128 18:49:39.982069 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.003752 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.018815 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.047778 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.052592 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5173a106-3a63-4c67-8b28-d30ccf1fea1c-operator-scripts\") pod \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.052736 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb33b8a5-83e7-4431-b6fa-489b022f600e-operator-scripts\") pod \"eb33b8a5-83e7-4431-b6fa-489b022f600e\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.052779 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-798ll\" (UniqueName: \"kubernetes.io/projected/927eabd5-6408-478b-8740-3897f0efcb05-kube-api-access-798ll\") pod \"927eabd5-6408-478b-8740-3897f0efcb05\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.052851 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dtzg\" (UniqueName: \"kubernetes.io/projected/eb33b8a5-83e7-4431-b6fa-489b022f600e-kube-api-access-8dtzg\") pod \"eb33b8a5-83e7-4431-b6fa-489b022f600e\" (UID: \"eb33b8a5-83e7-4431-b6fa-489b022f600e\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.052881 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-75l2f\" (UniqueName: \"kubernetes.io/projected/5173a106-3a63-4c67-8b28-d30ccf1fea1c-kube-api-access-75l2f\") pod \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\" (UID: \"5173a106-3a63-4c67-8b28-d30ccf1fea1c\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.052921 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/927eabd5-6408-478b-8740-3897f0efcb05-operator-scripts\") pod \"927eabd5-6408-478b-8740-3897f0efcb05\" (UID: \"927eabd5-6408-478b-8740-3897f0efcb05\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.053496 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5173a106-3a63-4c67-8b28-d30ccf1fea1c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "5173a106-3a63-4c67-8b28-d30ccf1fea1c" (UID: "5173a106-3a63-4c67-8b28-d30ccf1fea1c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.055465 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/927eabd5-6408-478b-8740-3897f0efcb05-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "927eabd5-6408-478b-8740-3897f0efcb05" (UID: "927eabd5-6408-478b-8740-3897f0efcb05"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.055758 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb33b8a5-83e7-4431-b6fa-489b022f600e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eb33b8a5-83e7-4431-b6fa-489b022f600e" (UID: "eb33b8a5-83e7-4431-b6fa-489b022f600e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.063396 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5173a106-3a63-4c67-8b28-d30ccf1fea1c-kube-api-access-75l2f" (OuterVolumeSpecName: "kube-api-access-75l2f") pod "5173a106-3a63-4c67-8b28-d30ccf1fea1c" (UID: "5173a106-3a63-4c67-8b28-d30ccf1fea1c"). InnerVolumeSpecName "kube-api-access-75l2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.063512 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.063577 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/927eabd5-6408-478b-8740-3897f0efcb05-kube-api-access-798ll" (OuterVolumeSpecName: "kube-api-access-798ll") pod "927eabd5-6408-478b-8740-3897f0efcb05" (UID: "927eabd5-6408-478b-8740-3897f0efcb05"). InnerVolumeSpecName "kube-api-access-798ll". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.067100 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb33b8a5-83e7-4431-b6fa-489b022f600e-kube-api-access-8dtzg" (OuterVolumeSpecName: "kube-api-access-8dtzg") pod "eb33b8a5-83e7-4431-b6fa-489b022f600e" (UID: "eb33b8a5-83e7-4431-b6fa-489b022f600e"). InnerVolumeSpecName "kube-api-access-8dtzg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.159402 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8a81f2b7-8862-4a59-bf90-23a8310a9b19-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8a81f2b7-8862-4a59-bf90-23a8310a9b19" (UID: "8a81f2b7-8862-4a59-bf90-23a8310a9b19"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.158491 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a81f2b7-8862-4a59-bf90-23a8310a9b19-operator-scripts\") pod \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.159646 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzzbz\" (UniqueName: \"kubernetes.io/projected/fced9a49-f6fd-4f6f-a71d-b7951315008f-kube-api-access-nzzbz\") pod \"fced9a49-f6fd-4f6f-a71d-b7951315008f\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.159713 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ft7d6\" (UniqueName: \"kubernetes.io/projected/8a81f2b7-8862-4a59-bf90-23a8310a9b19-kube-api-access-ft7d6\") pod \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\" (UID: \"8a81f2b7-8862-4a59-bf90-23a8310a9b19\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.159871 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fced9a49-f6fd-4f6f-a71d-b7951315008f-operator-scripts\") pod \"fced9a49-f6fd-4f6f-a71d-b7951315008f\" (UID: \"fced9a49-f6fd-4f6f-a71d-b7951315008f\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.159965 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pw6bq\" (UniqueName: \"kubernetes.io/projected/3c38d151-db9b-43d4-ab60-44b7f3169fd2-kube-api-access-pw6bq\") pod \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.160048 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c38d151-db9b-43d4-ab60-44b7f3169fd2-operator-scripts\") pod \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\" (UID: \"3c38d151-db9b-43d4-ab60-44b7f3169fd2\") " Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.161599 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c38d151-db9b-43d4-ab60-44b7f3169fd2-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c38d151-db9b-43d4-ab60-44b7f3169fd2" (UID: "3c38d151-db9b-43d4-ab60-44b7f3169fd2"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.161768 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fced9a49-f6fd-4f6f-a71d-b7951315008f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "fced9a49-f6fd-4f6f-a71d-b7951315008f" (UID: "fced9a49-f6fd-4f6f-a71d-b7951315008f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.161963 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eb33b8a5-83e7-4431-b6fa-489b022f600e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.161985 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-798ll\" (UniqueName: \"kubernetes.io/projected/927eabd5-6408-478b-8740-3897f0efcb05-kube-api-access-798ll\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.161999 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8a81f2b7-8862-4a59-bf90-23a8310a9b19-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.162011 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dtzg\" (UniqueName: \"kubernetes.io/projected/eb33b8a5-83e7-4431-b6fa-489b022f600e-kube-api-access-8dtzg\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.162022 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-75l2f\" (UniqueName: \"kubernetes.io/projected/5173a106-3a63-4c67-8b28-d30ccf1fea1c-kube-api-access-75l2f\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.162032 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/927eabd5-6408-478b-8740-3897f0efcb05-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.162042 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/fced9a49-f6fd-4f6f-a71d-b7951315008f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.162051 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c38d151-db9b-43d4-ab60-44b7f3169fd2-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.162061 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/5173a106-3a63-4c67-8b28-d30ccf1fea1c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.176240 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8a81f2b7-8862-4a59-bf90-23a8310a9b19-kube-api-access-ft7d6" (OuterVolumeSpecName: "kube-api-access-ft7d6") pod "8a81f2b7-8862-4a59-bf90-23a8310a9b19" (UID: "8a81f2b7-8862-4a59-bf90-23a8310a9b19"). InnerVolumeSpecName "kube-api-access-ft7d6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.176871 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c38d151-db9b-43d4-ab60-44b7f3169fd2-kube-api-access-pw6bq" (OuterVolumeSpecName: "kube-api-access-pw6bq") pod "3c38d151-db9b-43d4-ab60-44b7f3169fd2" (UID: "3c38d151-db9b-43d4-ab60-44b7f3169fd2"). InnerVolumeSpecName "kube-api-access-pw6bq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.176951 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fced9a49-f6fd-4f6f-a71d-b7951315008f-kube-api-access-nzzbz" (OuterVolumeSpecName: "kube-api-access-nzzbz") pod "fced9a49-f6fd-4f6f-a71d-b7951315008f" (UID: "fced9a49-f6fd-4f6f-a71d-b7951315008f"). InnerVolumeSpecName "kube-api-access-nzzbz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.263897 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzzbz\" (UniqueName: \"kubernetes.io/projected/fced9a49-f6fd-4f6f-a71d-b7951315008f-kube-api-access-nzzbz\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.264694 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ft7d6\" (UniqueName: \"kubernetes.io/projected/8a81f2b7-8862-4a59-bf90-23a8310a9b19-kube-api-access-ft7d6\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.264709 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pw6bq\" (UniqueName: \"kubernetes.io/projected/3c38d151-db9b-43d4-ab60-44b7f3169fd2-kube-api-access-pw6bq\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.448187 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-5774-account-create-update-8ptkt" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.448184 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-5774-account-create-update-8ptkt" event={"ID":"eb33b8a5-83e7-4431-b6fa-489b022f600e","Type":"ContainerDied","Data":"b665ea0fb4c79e28277e38e4a919b34f113eed0ca1fbef8428c6be425b81b3cc"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.448348 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b665ea0fb4c79e28277e38e4a919b34f113eed0ca1fbef8428c6be425b81b3cc" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.452058 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"ad3430c0141ba563241b3aa1749ecff2871fd4e77bdbf99d824e9b130362595d"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.452104 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"6e7ab1a2ef04925683f72cbe3efddf41d2ec2a80093c87f59fa6ac7063d17b14"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.455094 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-q4h5g" event={"ID":"2df7ec48-f703-40db-9f31-f4f216de7935","Type":"ContainerStarted","Data":"a7f007aaccbf51cdfdf73809d3699724db50b3a4abd709dfa57e68b47fb7fbdb"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.465633 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-b47a-account-create-update-ngbck" event={"ID":"fced9a49-f6fd-4f6f-a71d-b7951315008f","Type":"ContainerDied","Data":"c37aa327196f88cd03f74e74cdd6f61689bdc1117e5c25713a8d4aa8f91457ec"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.465675 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-b47a-account-create-update-ngbck" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.465682 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c37aa327196f88cd03f74e74cdd6f61689bdc1117e5c25713a8d4aa8f91457ec" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.473755 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-885qk" event={"ID":"927eabd5-6408-478b-8740-3897f0efcb05","Type":"ContainerDied","Data":"a9c801cc80623245505149773d45b9bfb92761cc181ed3f996695139e05c322e"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.473811 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a9c801cc80623245505149773d45b9bfb92761cc181ed3f996695139e05c322e" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.473923 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-885qk" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.478478 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-q4h5g" podStartSLOduration=3.157303133 podStartE2EDuration="11.478460857s" podCreationTimestamp="2026-01-28 18:49:29 +0000 UTC" firstStartedPulling="2026-01-28 18:49:31.48125986 +0000 UTC m=+1177.445442734" lastFinishedPulling="2026-01-28 18:49:39.802417584 +0000 UTC m=+1185.766600458" observedRunningTime="2026-01-28 18:49:40.476143064 +0000 UTC m=+1186.440325958" watchObservedRunningTime="2026-01-28 18:49:40.478460857 +0000 UTC m=+1186.442643731" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.482801 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-create-m5cgt" event={"ID":"8a81f2b7-8862-4a59-bf90-23a8310a9b19","Type":"ContainerDied","Data":"a31d4a620ce542db08c9c2cc12cbb516fca55a8d14ca6c321bd59d8c7df81441"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.482911 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a31d4a620ce542db08c9c2cc12cbb516fca55a8d14ca6c321bd59d8c7df81441" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.482846 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-create-m5cgt" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.486403 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c777-account-create-update-rg925" event={"ID":"3c38d151-db9b-43d4-ab60-44b7f3169fd2","Type":"ContainerDied","Data":"0d7801abcf6daf55d5dcc9b31ec96d66f2ce629914e4496a60128881ab536979"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.486472 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0d7801abcf6daf55d5dcc9b31ec96d66f2ce629914e4496a60128881ab536979" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.486567 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c777-account-create-update-rg925" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.495309 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-39de-account-create-update-cc7p9" event={"ID":"5173a106-3a63-4c67-8b28-d30ccf1fea1c","Type":"ContainerDied","Data":"b4312608902cc7bd75149b1d13bf33c9be26997dee57600c0c72d32b74231512"} Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.495364 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b4312608902cc7bd75149b1d13bf33c9be26997dee57600c0c72d32b74231512" Jan 28 18:49:40 crc kubenswrapper[4767]: I0128 18:49:40.495452 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-39de-account-create-update-cc7p9" Jan 28 18:49:41 crc kubenswrapper[4767]: I0128 18:49:41.515497 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"81f3d6175f64629752976dfa6c56fcfac2105803185150e999ea2ddc56759983"} Jan 28 18:49:41 crc kubenswrapper[4767]: I0128 18:49:41.516027 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"c3d5dad4a77e9c5593b90894227849585af24c8e2e72101d2529c5a2ea9e0e15"} Jan 28 18:49:41 crc kubenswrapper[4767]: I0128 18:49:41.519579 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q46gj" event={"ID":"b867495c-e01f-46a2-aa93-e42cd53d1b64","Type":"ContainerStarted","Data":"e7aab6561aff3b78b34ec76883e3527deafa9656c91e2a33fc479da3798f2fd0"} Jan 28 18:49:41 crc kubenswrapper[4767]: I0128 18:49:41.554139 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-q46gj" podStartSLOduration=2.748035996 podStartE2EDuration="30.554099818s" podCreationTimestamp="2026-01-28 18:49:11 +0000 UTC" firstStartedPulling="2026-01-28 18:49:12.772233502 +0000 UTC m=+1158.736416366" lastFinishedPulling="2026-01-28 18:49:40.578297304 +0000 UTC m=+1186.542480188" observedRunningTime="2026-01-28 18:49:41.541804944 +0000 UTC m=+1187.505987818" watchObservedRunningTime="2026-01-28 18:49:41.554099818 +0000 UTC m=+1187.518282692" Jan 28 18:49:43 crc kubenswrapper[4767]: I0128 18:49:43.544970 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"cfdf4aed67deaab5720369abf90b1449017c2b647d9ca565a1abc064ac44a09a"} Jan 28 18:49:43 crc kubenswrapper[4767]: I0128 18:49:43.545632 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"de513e2dad9b5dbb85a8637b43043b0d4e440b298a24f14c782ce60856a9578c"} Jan 28 18:49:43 crc kubenswrapper[4767]: I0128 18:49:43.545661 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"97594fd24d75a1dd2d3b0dd44852689cbf1f354d3b0385b2ddc43428d85b0ce9"} Jan 28 18:49:43 crc kubenswrapper[4767]: I0128 18:49:43.545672 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"4a3b06559467a7f1de0ac6e1636cf2b28af959cf780d4c2721382dc5b3370bae"} Jan 28 18:49:43 crc kubenswrapper[4767]: I0128 18:49:43.545683 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"17380f1f1e95a5f098c3e5fb3e8b16733f0024ea54f6f3609964e98822ada389"} Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.599906 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"ec1250a3b37c7fbf12b864e5ba8e6feb01d14aea4aaf320b7625e2a2d504d3b9"} Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.599960 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"c482494c-49e9-4314-a836-a7bea8f6f8c4","Type":"ContainerStarted","Data":"de123ac0a344033f778f8ca67adf7399a008eead17e0184992eccf5c283874b9"} Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.663754 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.250191787 podStartE2EDuration="51.663715249s" podCreationTimestamp="2026-01-28 18:48:53 +0000 UTC" firstStartedPulling="2026-01-28 18:49:28.008284743 +0000 UTC m=+1173.972467617" lastFinishedPulling="2026-01-28 18:49:42.421808205 +0000 UTC m=+1188.385991079" observedRunningTime="2026-01-28 18:49:44.658092074 +0000 UTC m=+1190.622274948" watchObservedRunningTime="2026-01-28 18:49:44.663715249 +0000 UTC m=+1190.627898123" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.973629 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rrxbj"] Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974064 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4fd6701-6878-4f65-b3aa-fd437ec22ba2" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974084 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4fd6701-6878-4f65-b3aa-fd437ec22ba2" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974101 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ebb33a9-654c-4edc-b487-7b2a08c424c6" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974109 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ebb33a9-654c-4edc-b487-7b2a08c424c6" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974118 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fced9a49-f6fd-4f6f-a71d-b7951315008f" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974125 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="fced9a49-f6fd-4f6f-a71d-b7951315008f" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974134 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="927eabd5-6408-478b-8740-3897f0efcb05" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974140 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="927eabd5-6408-478b-8740-3897f0efcb05" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974147 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5173a106-3a63-4c67-8b28-d30ccf1fea1c" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974153 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="5173a106-3a63-4c67-8b28-d30ccf1fea1c" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974164 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3c38d151-db9b-43d4-ab60-44b7f3169fd2" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974170 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3c38d151-db9b-43d4-ab60-44b7f3169fd2" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974189 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8a81f2b7-8862-4a59-bf90-23a8310a9b19" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974196 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8a81f2b7-8862-4a59-bf90-23a8310a9b19" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: E0128 18:49:44.974223 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb33b8a5-83e7-4431-b6fa-489b022f600e" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974230 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb33b8a5-83e7-4431-b6fa-489b022f600e" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974415 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8a81f2b7-8862-4a59-bf90-23a8310a9b19" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974427 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="fced9a49-f6fd-4f6f-a71d-b7951315008f" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974436 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3c38d151-db9b-43d4-ab60-44b7f3169fd2" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974446 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4fd6701-6878-4f65-b3aa-fd437ec22ba2" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974456 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="927eabd5-6408-478b-8740-3897f0efcb05" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974466 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="5173a106-3a63-4c67-8b28-d30ccf1fea1c" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974479 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ebb33a9-654c-4edc-b487-7b2a08c424c6" containerName="mariadb-database-create" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.974490 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb33b8a5-83e7-4431-b6fa-489b022f600e" containerName="mariadb-account-create-update" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.975464 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.978345 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 28 18:49:44 crc kubenswrapper[4767]: I0128 18:49:44.992973 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rrxbj"] Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.071361 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.071472 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.071508 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.071528 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-config\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.071573 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.071630 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zglp\" (UniqueName: \"kubernetes.io/projected/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-kube-api-access-6zglp\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.172851 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.172970 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zglp\" (UniqueName: \"kubernetes.io/projected/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-kube-api-access-6zglp\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.173014 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.173093 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.173146 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.173177 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-config\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.174269 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-swift-storage-0\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.174404 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-nb\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.174410 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-svc\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.174636 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-config\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.175042 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-sb\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.201770 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zglp\" (UniqueName: \"kubernetes.io/projected/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-kube-api-access-6zglp\") pod \"dnsmasq-dns-5c79d794d7-rrxbj\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.296181 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.455247 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.455598 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:49:45 crc kubenswrapper[4767]: I0128 18:49:45.809636 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rrxbj"] Jan 28 18:49:46 crc kubenswrapper[4767]: I0128 18:49:46.616945 4767 generic.go:334] "Generic (PLEG): container finished" podID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerID="2da04d2dcae9390a0ba140da6f551e28fb14a6a8b3126c8be0da10bb10d0fa53" exitCode=0 Jan 28 18:49:46 crc kubenswrapper[4767]: I0128 18:49:46.617077 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" event={"ID":"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a","Type":"ContainerDied","Data":"2da04d2dcae9390a0ba140da6f551e28fb14a6a8b3126c8be0da10bb10d0fa53"} Jan 28 18:49:46 crc kubenswrapper[4767]: I0128 18:49:46.617268 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" event={"ID":"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a","Type":"ContainerStarted","Data":"3fd234d1799084fe1ec74ed51f5279c4f330b4dc60cea6b7b2ffbd3a66452cad"} Jan 28 18:49:47 crc kubenswrapper[4767]: I0128 18:49:47.651974 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" event={"ID":"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a","Type":"ContainerStarted","Data":"02de285e9c400284b60bc7418df05d21dc1f0fc9941356d1025d97c308bd66fa"} Jan 28 18:49:47 crc kubenswrapper[4767]: I0128 18:49:47.652859 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:47 crc kubenswrapper[4767]: I0128 18:49:47.682074 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" podStartSLOduration=3.682052139 podStartE2EDuration="3.682052139s" podCreationTimestamp="2026-01-28 18:49:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:47.676869407 +0000 UTC m=+1193.641052291" watchObservedRunningTime="2026-01-28 18:49:47.682052139 +0000 UTC m=+1193.646235013" Jan 28 18:49:48 crc kubenswrapper[4767]: I0128 18:49:48.663145 4767 generic.go:334] "Generic (PLEG): container finished" podID="2df7ec48-f703-40db-9f31-f4f216de7935" containerID="a7f007aaccbf51cdfdf73809d3699724db50b3a4abd709dfa57e68b47fb7fbdb" exitCode=0 Jan 28 18:49:48 crc kubenswrapper[4767]: I0128 18:49:48.663386 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-q4h5g" event={"ID":"2df7ec48-f703-40db-9f31-f4f216de7935","Type":"ContainerDied","Data":"a7f007aaccbf51cdfdf73809d3699724db50b3a4abd709dfa57e68b47fb7fbdb"} Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.044907 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.169790 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-combined-ca-bundle\") pod \"2df7ec48-f703-40db-9f31-f4f216de7935\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.169972 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-config-data\") pod \"2df7ec48-f703-40db-9f31-f4f216de7935\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.170179 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f574l\" (UniqueName: \"kubernetes.io/projected/2df7ec48-f703-40db-9f31-f4f216de7935-kube-api-access-f574l\") pod \"2df7ec48-f703-40db-9f31-f4f216de7935\" (UID: \"2df7ec48-f703-40db-9f31-f4f216de7935\") " Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.176504 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2df7ec48-f703-40db-9f31-f4f216de7935-kube-api-access-f574l" (OuterVolumeSpecName: "kube-api-access-f574l") pod "2df7ec48-f703-40db-9f31-f4f216de7935" (UID: "2df7ec48-f703-40db-9f31-f4f216de7935"). InnerVolumeSpecName "kube-api-access-f574l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.197171 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2df7ec48-f703-40db-9f31-f4f216de7935" (UID: "2df7ec48-f703-40db-9f31-f4f216de7935"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.216045 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-config-data" (OuterVolumeSpecName: "config-data") pod "2df7ec48-f703-40db-9f31-f4f216de7935" (UID: "2df7ec48-f703-40db-9f31-f4f216de7935"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.272671 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.272707 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f574l\" (UniqueName: \"kubernetes.io/projected/2df7ec48-f703-40db-9f31-f4f216de7935-kube-api-access-f574l\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.272717 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2df7ec48-f703-40db-9f31-f4f216de7935-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.684900 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-q4h5g" event={"ID":"2df7ec48-f703-40db-9f31-f4f216de7935","Type":"ContainerDied","Data":"177eb935a27fa30020e476b3663f27205fe3a4601113ed07839b357a534b6e0f"} Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.684937 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="177eb935a27fa30020e476b3663f27205fe3a4601113ed07839b357a534b6e0f" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.684945 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-q4h5g" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.962269 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rrxbj"] Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.962843 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" podUID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerName="dnsmasq-dns" containerID="cri-o://02de285e9c400284b60bc7418df05d21dc1f0fc9941356d1025d97c308bd66fa" gracePeriod=10 Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.984403 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-v9b86"] Jan 28 18:49:50 crc kubenswrapper[4767]: E0128 18:49:50.984933 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2df7ec48-f703-40db-9f31-f4f216de7935" containerName="keystone-db-sync" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.984959 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2df7ec48-f703-40db-9f31-f4f216de7935" containerName="keystone-db-sync" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.985190 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2df7ec48-f703-40db-9f31-f4f216de7935" containerName="keystone-db-sync" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.986383 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.988494 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lb5dl" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.988680 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.990702 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.990962 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 18:49:50 crc kubenswrapper[4767]: I0128 18:49:50.991106 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.029524 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4lq7c"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.031310 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.052890 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-v9b86"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.120293 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4lq7c"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.140365 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-db-sync-5ptf6"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.141583 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.152165 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.152573 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-rvl9v" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.202820 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-5ptf6"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.236694 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qpt9q\" (UniqueName: \"kubernetes.io/projected/b113dd1e-6f00-4827-8945-48317056e181-kube-api-access-qpt9q\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.236758 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-combined-ca-bundle\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.236833 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-svc\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.236965 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-credential-keys\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.236993 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.237053 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-scripts\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.237105 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.237239 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q488x\" (UniqueName: \"kubernetes.io/projected/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-kube-api-access-q488x\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.237268 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.237326 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-config-data\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.237444 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-fernet-keys\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.237479 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-config\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.318239 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-kdhmb"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.319552 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.330913 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xl44z" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.331276 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.338033 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.338902 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-svc\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.338986 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-credential-keys\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339016 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-config-data\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339039 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339063 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xq4mq\" (UniqueName: \"kubernetes.io/projected/b956870a-eae0-48fb-8e4f-182a9f276308-kube-api-access-xq4mq\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339102 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-scripts\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339136 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339223 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q488x\" (UniqueName: \"kubernetes.io/projected/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-kube-api-access-q488x\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339251 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339292 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-combined-ca-bundle\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339326 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-config-data\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339385 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-fernet-keys\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339428 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-config\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339467 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qpt9q\" (UniqueName: \"kubernetes.io/projected/b113dd1e-6f00-4827-8945-48317056e181-kube-api-access-qpt9q\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.339489 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-combined-ca-bundle\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.348142 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-nb\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.348723 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-sb\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.353558 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-svc\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.357510 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-swift-storage-0\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.357660 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-config\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.359571 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-vm9r4"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.360703 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.361110 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-scripts\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.362155 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-credential-keys\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.367256 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-config-data\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.372774 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-fernet-keys\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.373741 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-combined-ca-bundle\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.378685 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.380673 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.387527 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-xhhd2" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.390706 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q488x\" (UniqueName: \"kubernetes.io/projected/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-kube-api-access-q488x\") pod \"dnsmasq-dns-5b868669f-4lq7c\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.406373 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-kdhmb"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.413343 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.415870 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qpt9q\" (UniqueName: \"kubernetes.io/projected/b113dd1e-6f00-4827-8945-48317056e181-kube-api-access-qpt9q\") pod \"keystone-bootstrap-v9b86\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.451296 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.453358 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.457803 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.458397 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.458743 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-config\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.458826 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.458893 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-combined-ca-bundle\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.458973 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-scripts\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.459043 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-config-data\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.459064 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65xzt\" (UniqueName: \"kubernetes.io/projected/28fde299-6f90-4c23-8a4f-15823bd8f4c5-kube-api-access-65xzt\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.459088 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lk2wr\" (UniqueName: \"kubernetes.io/projected/cbd7dd91-c84a-442f-86af-c3a06ca9a373-kube-api-access-lk2wr\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.459139 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-combined-ca-bundle\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.461889 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.461937 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/28fde299-6f90-4c23-8a4f-15823bd8f4c5-etc-machine-id\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.462015 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-scripts\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.462105 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-log-httpd\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.462193 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-run-httpd\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.462314 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-config-data\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.464263 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-config-data\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.464363 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xq4mq\" (UniqueName: \"kubernetes.io/projected/b956870a-eae0-48fb-8e4f-182a9f276308-kube-api-access-xq4mq\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.465394 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xnc2\" (UniqueName: \"kubernetes.io/projected/a5d5a120-7ef9-431a-88a4-915986881b2d-kube-api-access-9xnc2\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.465539 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-db-sync-config-data\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.465606 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-combined-ca-bundle\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.477430 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-combined-ca-bundle\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.481277 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-vm9r4"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.487820 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-config-data\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.511147 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xq4mq\" (UniqueName: \"kubernetes.io/projected/b956870a-eae0-48fb-8e4f-182a9f276308-kube-api-access-xq4mq\") pod \"heat-db-sync-5ptf6\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.519985 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5ptf6" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.547896 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573436 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573499 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-scripts\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573522 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-config-data\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573538 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65xzt\" (UniqueName: \"kubernetes.io/projected/28fde299-6f90-4c23-8a4f-15823bd8f4c5-kube-api-access-65xzt\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573557 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lk2wr\" (UniqueName: \"kubernetes.io/projected/cbd7dd91-c84a-442f-86af-c3a06ca9a373-kube-api-access-lk2wr\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573571 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-combined-ca-bundle\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573596 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573617 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/28fde299-6f90-4c23-8a4f-15823bd8f4c5-etc-machine-id\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573645 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-scripts\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573666 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-log-httpd\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573696 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-run-httpd\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573720 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-config-data\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573749 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xnc2\" (UniqueName: \"kubernetes.io/projected/a5d5a120-7ef9-431a-88a4-915986881b2d-kube-api-access-9xnc2\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573770 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-db-sync-config-data\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573794 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-combined-ca-bundle\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.573833 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-config\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.574471 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/28fde299-6f90-4c23-8a4f-15823bd8f4c5-etc-machine-id\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.583612 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.595409 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-scripts\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.595984 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-log-httpd\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.596325 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-run-httpd\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.598401 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-combined-ca-bundle\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.598962 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-scripts\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.603650 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-config-data\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.604195 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-config\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.606243 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-config-data\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.625814 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-db-sync-config-data\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.634903 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.635068 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-combined-ca-bundle\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.635360 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.673747 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lk2wr\" (UniqueName: \"kubernetes.io/projected/cbd7dd91-c84a-442f-86af-c3a06ca9a373-kube-api-access-lk2wr\") pod \"neutron-db-sync-kdhmb\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.678246 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xnc2\" (UniqueName: \"kubernetes.io/projected/a5d5a120-7ef9-431a-88a4-915986881b2d-kube-api-access-9xnc2\") pod \"ceilometer-0\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " pod="openstack/ceilometer-0" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.699656 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65xzt\" (UniqueName: \"kubernetes.io/projected/28fde299-6f90-4c23-8a4f-15823bd8f4c5-kube-api-access-65xzt\") pod \"cinder-db-sync-vm9r4\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.743497 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4lq7c"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.754151 4767 generic.go:334] "Generic (PLEG): container finished" podID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerID="02de285e9c400284b60bc7418df05d21dc1f0fc9941356d1025d97c308bd66fa" exitCode=0 Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.754197 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" event={"ID":"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a","Type":"ContainerDied","Data":"02de285e9c400284b60bc7418df05d21dc1f0fc9941356d1025d97c308bd66fa"} Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.759698 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-5nj7s"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.761171 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.764719 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-6jzm4" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.764807 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.764877 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.773730 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-5nj7s"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.799934 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-combined-ca-bundle\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.799990 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008911ac-269d-47a4-a624-0e789415d794-logs\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.800034 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-config-data\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.800102 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdjlw\" (UniqueName: \"kubernetes.io/projected/008911ac-269d-47a4-a624-0e789415d794-kube-api-access-pdjlw\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.800133 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-scripts\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.823876 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-8bggl"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.825145 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.843643 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.847939 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-b5nn8" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.870270 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-hxs9q"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.871720 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.901513 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-8bggl"] Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902396 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-scripts\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902436 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-combined-ca-bundle\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902462 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902486 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-db-sync-config-data\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902518 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-config\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902535 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902556 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-svc\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902591 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902616 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-combined-ca-bundle\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902637 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zn6n\" (UniqueName: \"kubernetes.io/projected/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-kube-api-access-7zn6n\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902659 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrff6\" (UniqueName: \"kubernetes.io/projected/e0c1967a-8d14-4e18-8049-640fccb74a19-kube-api-access-zrff6\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902690 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008911ac-269d-47a4-a624-0e789415d794-logs\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902723 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-config-data\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.902782 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdjlw\" (UniqueName: \"kubernetes.io/projected/008911ac-269d-47a4-a624-0e789415d794-kube-api-access-pdjlw\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.904437 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008911ac-269d-47a4-a624-0e789415d794-logs\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.908875 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-combined-ca-bundle\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.913601 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-scripts\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.919257 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-config-data\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.933070 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdjlw\" (UniqueName: \"kubernetes.io/projected/008911ac-269d-47a4-a624-0e789415d794-kube-api-access-pdjlw\") pod \"placement-db-sync-5nj7s\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:51 crc kubenswrapper[4767]: I0128 18:49:51.938824 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-hxs9q"] Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.005468 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-combined-ca-bundle\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.005826 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.005850 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-db-sync-config-data\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.005900 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-config\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.005924 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.005950 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-svc\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.005997 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.006065 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zn6n\" (UniqueName: \"kubernetes.io/projected/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-kube-api-access-7zn6n\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.006093 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrff6\" (UniqueName: \"kubernetes.io/projected/e0c1967a-8d14-4e18-8049-640fccb74a19-kube-api-access-zrff6\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.009146 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-svc\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.010037 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-sb\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.022343 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-combined-ca-bundle\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.022575 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-swift-storage-0\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.025424 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-config\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.025486 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-db-sync-config-data\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.029726 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-nb\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.034675 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zn6n\" (UniqueName: \"kubernetes.io/projected/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-kube-api-access-7zn6n\") pod \"barbican-db-sync-8bggl\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.039494 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrff6\" (UniqueName: \"kubernetes.io/projected/e0c1967a-8d14-4e18-8049-640fccb74a19-kube-api-access-zrff6\") pod \"dnsmasq-dns-cf78879c9-hxs9q\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.108728 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.139325 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5nj7s" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.283718 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.299046 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.308579 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.344675 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8bggl" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.498489 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.633874 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6zglp\" (UniqueName: \"kubernetes.io/projected/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-kube-api-access-6zglp\") pod \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.633966 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-swift-storage-0\") pod \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.634051 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-config\") pod \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.634249 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-sb\") pod \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.634333 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-svc\") pod \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.634404 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-nb\") pod \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\" (UID: \"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a\") " Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.668708 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-kube-api-access-6zglp" (OuterVolumeSpecName: "kube-api-access-6zglp") pod "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" (UID: "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a"). InnerVolumeSpecName "kube-api-access-6zglp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.681193 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-v9b86"] Jan 28 18:49:52 crc kubenswrapper[4767]: W0128 18:49:52.685040 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb956870a_eae0_48fb_8e4f_182a9f276308.slice/crio-7814196a4da2add88b3faa267244a54e4877e24ac10f357f4ec3a1864e0f8e3e WatchSource:0}: Error finding container 7814196a4da2add88b3faa267244a54e4877e24ac10f357f4ec3a1864e0f8e3e: Status 404 returned error can't find the container with id 7814196a4da2add88b3faa267244a54e4877e24ac10f357f4ec3a1864e0f8e3e Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.714948 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-db-sync-5ptf6"] Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.737644 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6zglp\" (UniqueName: \"kubernetes.io/projected/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-kube-api-access-6zglp\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.745739 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" (UID: "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.755171 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-config" (OuterVolumeSpecName: "config") pod "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" (UID: "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.761496 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4lq7c"] Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.785592 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-5nj7s"] Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.787928 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5ptf6" event={"ID":"b956870a-eae0-48fb-8e4f-182a9f276308","Type":"ContainerStarted","Data":"7814196a4da2add88b3faa267244a54e4877e24ac10f357f4ec3a1864e0f8e3e"} Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.789324 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-4lq7c" event={"ID":"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09","Type":"ContainerStarted","Data":"108d0e848930226d3cd191fb7449ecf1a74b5714fcd276ac81c92832da33d1d9"} Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.796392 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" (UID: "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.798074 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.798842 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" (UID: "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.799357 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" (UID: "80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.811962 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c79d794d7-rrxbj" event={"ID":"80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a","Type":"ContainerDied","Data":"3fd234d1799084fe1ec74ed51f5279c4f330b4dc60cea6b7b2ffbd3a66452cad"} Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.812008 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-kdhmb"] Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.812025 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-v9b86" event={"ID":"b113dd1e-6f00-4827-8945-48317056e181","Type":"ContainerStarted","Data":"ba01e069b22b38d871777544b61a3f3ce2041399c54bd8e38e7c6d73460edd6e"} Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.812042 4767 scope.go:117] "RemoveContainer" containerID="02de285e9c400284b60bc7418df05d21dc1f0fc9941356d1025d97c308bd66fa" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.839675 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.839914 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.839928 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.839941 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.839956 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:52 crc kubenswrapper[4767]: W0128 18:49:52.930201 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod008911ac_269d_47a4_a624_0e789415d794.slice/crio-986a1bd0bbfd7170982e95e22bbddcc7c7e5ba5b5f8ec96c86630d514904cf75 WatchSource:0}: Error finding container 986a1bd0bbfd7170982e95e22bbddcc7c7e5ba5b5f8ec96c86630d514904cf75: Status 404 returned error can't find the container with id 986a1bd0bbfd7170982e95e22bbddcc7c7e5ba5b5f8ec96c86630d514904cf75 Jan 28 18:49:52 crc kubenswrapper[4767]: I0128 18:49:52.949738 4767 scope.go:117] "RemoveContainer" containerID="2da04d2dcae9390a0ba140da6f551e28fb14a6a8b3126c8be0da10bb10d0fa53" Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.067672 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-8bggl"] Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.119647 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.130285 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-hxs9q"] Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.136190 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.149300 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rrxbj"] Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.156565 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c79d794d7-rrxbj"] Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.263300 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-vm9r4"] Jan 28 18:49:53 crc kubenswrapper[4767]: W0128 18:49:53.287271 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28fde299_6f90_4c23_8a4f_15823bd8f4c5.slice/crio-92597bae4fad0bf39706b62f0e95ae03752a0a75c02e2be437a4c1b499dcf395 WatchSource:0}: Error finding container 92597bae4fad0bf39706b62f0e95ae03752a0a75c02e2be437a4c1b499dcf395: Status 404 returned error can't find the container with id 92597bae4fad0bf39706b62f0e95ae03752a0a75c02e2be437a4c1b499dcf395 Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.815126 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kdhmb" event={"ID":"cbd7dd91-c84a-442f-86af-c3a06ca9a373","Type":"ContainerStarted","Data":"d46fa9f61d3a844beeae98cfaa9a563690b5a6876b170d769db6eec982b81858"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.815937 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kdhmb" event={"ID":"cbd7dd91-c84a-442f-86af-c3a06ca9a373","Type":"ContainerStarted","Data":"e4dd237191b1774a347be9e1d72e23ba7500a507a2ade51cc7444e7c430536f9"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.816642 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8bggl" event={"ID":"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5","Type":"ContainerStarted","Data":"6c2a880bc50a5dfb99aa46d3454600c415046bfef6bd8fba3a5c371c95e4483c"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.819705 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5nj7s" event={"ID":"008911ac-269d-47a4-a624-0e789415d794","Type":"ContainerStarted","Data":"986a1bd0bbfd7170982e95e22bbddcc7c7e5ba5b5f8ec96c86630d514904cf75"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.824640 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vm9r4" event={"ID":"28fde299-6f90-4c23-8a4f-15823bd8f4c5","Type":"ContainerStarted","Data":"92597bae4fad0bf39706b62f0e95ae03752a0a75c02e2be437a4c1b499dcf395"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.828457 4767 generic.go:334] "Generic (PLEG): container finished" podID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerID="49a970cadcc270e3e35cf9086f9ee13fa8b2b9aff884ff6121445410c5312cfd" exitCode=0 Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.828551 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" event={"ID":"e0c1967a-8d14-4e18-8049-640fccb74a19","Type":"ContainerDied","Data":"49a970cadcc270e3e35cf9086f9ee13fa8b2b9aff884ff6121445410c5312cfd"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.828580 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" event={"ID":"e0c1967a-8d14-4e18-8049-640fccb74a19","Type":"ContainerStarted","Data":"1e2c48c2cffaa4df152cdf697cfaf52cce82de11854e7c32939dd97ea4f0ed2d"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.832560 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerStarted","Data":"4a71f18e24e036be3bd6de9874d976e7f2597700c3f9e3303a6750367e1a2ec4"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.845867 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-v9b86" event={"ID":"b113dd1e-6f00-4827-8945-48317056e181","Type":"ContainerStarted","Data":"759efdc292c13b8f3443ede99edb215a1788a2aa61e0956e4bb9246f96f555b8"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.848229 4767 generic.go:334] "Generic (PLEG): container finished" podID="bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" containerID="e4081ae17437af372ed9d29422afb5e77b6f332bad963bbe92b46983b3cee691" exitCode=0 Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.848270 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-4lq7c" event={"ID":"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09","Type":"ContainerDied","Data":"e4081ae17437af372ed9d29422afb5e77b6f332bad963bbe92b46983b3cee691"} Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.870401 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-kdhmb" podStartSLOduration=2.870380375 podStartE2EDuration="2.870380375s" podCreationTimestamp="2026-01-28 18:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:53.832375419 +0000 UTC m=+1199.796558283" watchObservedRunningTime="2026-01-28 18:49:53.870380375 +0000 UTC m=+1199.834563249" Jan 28 18:49:53 crc kubenswrapper[4767]: I0128 18:49:53.899238 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-v9b86" podStartSLOduration=3.899194755 podStartE2EDuration="3.899194755s" podCreationTimestamp="2026-01-28 18:49:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:53.883031841 +0000 UTC m=+1199.847214725" watchObservedRunningTime="2026-01-28 18:49:53.899194755 +0000 UTC m=+1199.863377629" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.426996 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.490916 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-nb\") pod \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.491003 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-svc\") pod \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.491087 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-sb\") pod \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.491122 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-config\") pod \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.491194 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q488x\" (UniqueName: \"kubernetes.io/projected/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-kube-api-access-q488x\") pod \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.491374 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-swift-storage-0\") pod \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\" (UID: \"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09\") " Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.501562 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-kube-api-access-q488x" (OuterVolumeSpecName: "kube-api-access-q488x") pod "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" (UID: "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09"). InnerVolumeSpecName "kube-api-access-q488x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.529368 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" (UID: "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.532539 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" (UID: "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.534758 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-config" (OuterVolumeSpecName: "config") pod "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" (UID: "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.552556 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" (UID: "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.557988 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" (UID: "bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.602833 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.602889 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.602900 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.602912 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.602922 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q488x\" (UniqueName: \"kubernetes.io/projected/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-kube-api-access-q488x\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.602939 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.818035 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" path="/var/lib/kubelet/pods/80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a/volumes" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.885967 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b868669f-4lq7c" event={"ID":"bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09","Type":"ContainerDied","Data":"108d0e848930226d3cd191fb7449ecf1a74b5714fcd276ac81c92832da33d1d9"} Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.886037 4767 scope.go:117] "RemoveContainer" containerID="e4081ae17437af372ed9d29422afb5e77b6f332bad963bbe92b46983b3cee691" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.886241 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b868669f-4lq7c" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.900371 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" event={"ID":"e0c1967a-8d14-4e18-8049-640fccb74a19","Type":"ContainerStarted","Data":"7ebb7015226cabcd6b9a74839917938386cc5f994638fbfec33a5c009f025b55"} Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.900420 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:49:54 crc kubenswrapper[4767]: I0128 18:49:54.985558 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4lq7c"] Jan 28 18:49:55 crc kubenswrapper[4767]: I0128 18:49:54.995107 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b868669f-4lq7c"] Jan 28 18:49:55 crc kubenswrapper[4767]: I0128 18:49:55.011650 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" podStartSLOduration=4.011607165 podStartE2EDuration="4.011607165s" podCreationTimestamp="2026-01-28 18:49:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:49:55.0005876 +0000 UTC m=+1200.964770494" watchObservedRunningTime="2026-01-28 18:49:55.011607165 +0000 UTC m=+1200.975790039" Jan 28 18:49:56 crc kubenswrapper[4767]: I0128 18:49:56.810461 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" path="/var/lib/kubelet/pods/bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09/volumes" Jan 28 18:50:02 crc kubenswrapper[4767]: I0128 18:50:02.321709 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:50:02 crc kubenswrapper[4767]: I0128 18:50:02.398331 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tbfrc"] Jan 28 18:50:02 crc kubenswrapper[4767]: I0128 18:50:02.398617 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" containerID="cri-o://bdd293df75c1a9231891f2ec61ba825228b2169f3e828744b0e0f6d529ebca2d" gracePeriod=10 Jan 28 18:50:03 crc kubenswrapper[4767]: I0128 18:50:03.596716 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: connect: connection refused" Jan 28 18:50:06 crc kubenswrapper[4767]: I0128 18:50:06.009450 4767 generic.go:334] "Generic (PLEG): container finished" podID="542e241a-df61-4376-8a81-8b82452979d0" containerID="bdd293df75c1a9231891f2ec61ba825228b2169f3e828744b0e0f6d529ebca2d" exitCode=0 Jan 28 18:50:06 crc kubenswrapper[4767]: I0128 18:50:06.009528 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" event={"ID":"542e241a-df61-4376-8a81-8b82452979d0","Type":"ContainerDied","Data":"bdd293df75c1a9231891f2ec61ba825228b2169f3e828744b0e0f6d529ebca2d"} Jan 28 18:50:07 crc kubenswrapper[4767]: E0128 18:50:07.848424 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Jan 28 18:50:07 crc kubenswrapper[4767]: E0128 18:50:07.849232 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pdjlw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-5nj7s_openstack(008911ac-269d-47a4-a624-0e789415d794): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:50:07 crc kubenswrapper[4767]: E0128 18:50:07.850815 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-5nj7s" podUID="008911ac-269d-47a4-a624-0e789415d794" Jan 28 18:50:08 crc kubenswrapper[4767]: E0128 18:50:08.026675 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-5nj7s" podUID="008911ac-269d-47a4-a624-0e789415d794" Jan 28 18:50:13 crc kubenswrapper[4767]: I0128 18:50:13.595994 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 28 18:50:15 crc kubenswrapper[4767]: I0128 18:50:15.455761 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:50:15 crc kubenswrapper[4767]: I0128 18:50:15.455849 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:50:15 crc kubenswrapper[4767]: I0128 18:50:15.455916 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:50:15 crc kubenswrapper[4767]: I0128 18:50:15.456619 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"efe49b1f3887d0c6654c94b4c1818b6bf7a2508307ca13c8afacae337561c559"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:50:15 crc kubenswrapper[4767]: I0128 18:50:15.456697 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://efe49b1f3887d0c6654c94b4c1818b6bf7a2508307ca13c8afacae337561c559" gracePeriod=600 Jan 28 18:50:18 crc kubenswrapper[4767]: I0128 18:50:18.120441 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="efe49b1f3887d0c6654c94b4c1818b6bf7a2508307ca13c8afacae337561c559" exitCode=0 Jan 28 18:50:18 crc kubenswrapper[4767]: I0128 18:50:18.120982 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"efe49b1f3887d0c6654c94b4c1818b6bf7a2508307ca13c8afacae337561c559"} Jan 28 18:50:18 crc kubenswrapper[4767]: I0128 18:50:18.121069 4767 scope.go:117] "RemoveContainer" containerID="3b61dfadd6f0461bd4bf84451309ddd45bb81f3a524162955ded57c40d87733d" Jan 28 18:50:18 crc kubenswrapper[4767]: I0128 18:50:18.598143 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 28 18:50:18 crc kubenswrapper[4767]: I0128 18:50:18.598675 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:50:23 crc kubenswrapper[4767]: I0128 18:50:23.598932 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 28 18:50:25 crc kubenswrapper[4767]: E0128 18:50:25.163635 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Jan 28 18:50:25 crc kubenswrapper[4767]: E0128 18:50:25.164396 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-65xzt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-vm9r4_openstack(28fde299-6f90-4c23-8a4f-15823bd8f4c5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:50:25 crc kubenswrapper[4767]: E0128 18:50:25.165981 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-vm9r4" podUID="28fde299-6f90-4c23-8a4f-15823bd8f4c5" Jan 28 18:50:25 crc kubenswrapper[4767]: I0128 18:50:25.249928 4767 generic.go:334] "Generic (PLEG): container finished" podID="b113dd1e-6f00-4827-8945-48317056e181" containerID="759efdc292c13b8f3443ede99edb215a1788a2aa61e0956e4bb9246f96f555b8" exitCode=0 Jan 28 18:50:25 crc kubenswrapper[4767]: I0128 18:50:25.250025 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-v9b86" event={"ID":"b113dd1e-6f00-4827-8945-48317056e181","Type":"ContainerDied","Data":"759efdc292c13b8f3443ede99edb215a1788a2aa61e0956e4bb9246f96f555b8"} Jan 28 18:50:25 crc kubenswrapper[4767]: E0128 18:50:25.253489 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-vm9r4" podUID="28fde299-6f90-4c23-8a4f-15823bd8f4c5" Jan 28 18:50:27 crc kubenswrapper[4767]: I0128 18:50:27.272976 4767 generic.go:334] "Generic (PLEG): container finished" podID="b867495c-e01f-46a2-aa93-e42cd53d1b64" containerID="e7aab6561aff3b78b34ec76883e3527deafa9656c91e2a33fc479da3798f2fd0" exitCode=0 Jan 28 18:50:27 crc kubenswrapper[4767]: I0128 18:50:27.273028 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q46gj" event={"ID":"b867495c-e01f-46a2-aa93-e42cd53d1b64","Type":"ContainerDied","Data":"e7aab6561aff3b78b34ec76883e3527deafa9656c91e2a33fc479da3798f2fd0"} Jan 28 18:50:28 crc kubenswrapper[4767]: I0128 18:50:28.600456 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 28 18:50:33 crc kubenswrapper[4767]: I0128 18:50:33.601670 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 28 18:50:34 crc kubenswrapper[4767]: I0128 18:50:34.998803 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.111686 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z2kv5\" (UniqueName: \"kubernetes.io/projected/542e241a-df61-4376-8a81-8b82452979d0-kube-api-access-z2kv5\") pod \"542e241a-df61-4376-8a81-8b82452979d0\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.112308 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-dns-svc\") pod \"542e241a-df61-4376-8a81-8b82452979d0\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.112440 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-config\") pod \"542e241a-df61-4376-8a81-8b82452979d0\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.112576 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-nb\") pod \"542e241a-df61-4376-8a81-8b82452979d0\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.112722 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-sb\") pod \"542e241a-df61-4376-8a81-8b82452979d0\" (UID: \"542e241a-df61-4376-8a81-8b82452979d0\") " Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.119145 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/542e241a-df61-4376-8a81-8b82452979d0-kube-api-access-z2kv5" (OuterVolumeSpecName: "kube-api-access-z2kv5") pod "542e241a-df61-4376-8a81-8b82452979d0" (UID: "542e241a-df61-4376-8a81-8b82452979d0"). InnerVolumeSpecName "kube-api-access-z2kv5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.165943 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-config" (OuterVolumeSpecName: "config") pod "542e241a-df61-4376-8a81-8b82452979d0" (UID: "542e241a-df61-4376-8a81-8b82452979d0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.171049 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "542e241a-df61-4376-8a81-8b82452979d0" (UID: "542e241a-df61-4376-8a81-8b82452979d0"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.171768 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "542e241a-df61-4376-8a81-8b82452979d0" (UID: "542e241a-df61-4376-8a81-8b82452979d0"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.178900 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "542e241a-df61-4376-8a81-8b82452979d0" (UID: "542e241a-df61-4376-8a81-8b82452979d0"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.215001 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z2kv5\" (UniqueName: \"kubernetes.io/projected/542e241a-df61-4376-8a81-8b82452979d0-kube-api-access-z2kv5\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.215050 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.215063 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.215076 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.215091 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/542e241a-df61-4376-8a81-8b82452979d0-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.359359 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" event={"ID":"542e241a-df61-4376-8a81-8b82452979d0","Type":"ContainerDied","Data":"ef6a6f76f119a587c119bf42dcb3bd954d940925c68701bfdc82681e2c802e16"} Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.359539 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.414633 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tbfrc"] Jan 28 18:50:35 crc kubenswrapper[4767]: I0128 18:50:35.422086 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-b8fbc5445-tbfrc"] Jan 28 18:50:36 crc kubenswrapper[4767]: E0128 18:50:36.052151 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Jan 28 18:50:36 crc kubenswrapper[4767]: E0128 18:50:36.052429 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pdjlw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-5nj7s_openstack(008911ac-269d-47a4-a624-0e789415d794): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:50:36 crc kubenswrapper[4767]: E0128 18:50:36.054310 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-5nj7s" podUID="008911ac-269d-47a4-a624-0e789415d794" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.147103 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.153199 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q46gj" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.332455 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-config-data\") pod \"b867495c-e01f-46a2-aa93-e42cd53d1b64\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.332800 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-combined-ca-bundle\") pod \"b867495c-e01f-46a2-aa93-e42cd53d1b64\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.332858 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-credential-keys\") pod \"b113dd1e-6f00-4827-8945-48317056e181\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.332910 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qpt9q\" (UniqueName: \"kubernetes.io/projected/b113dd1e-6f00-4827-8945-48317056e181-kube-api-access-qpt9q\") pod \"b113dd1e-6f00-4827-8945-48317056e181\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.332950 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-db-sync-config-data\") pod \"b867495c-e01f-46a2-aa93-e42cd53d1b64\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.332966 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-combined-ca-bundle\") pod \"b113dd1e-6f00-4827-8945-48317056e181\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.333029 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-fernet-keys\") pod \"b113dd1e-6f00-4827-8945-48317056e181\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.333058 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pnbfn\" (UniqueName: \"kubernetes.io/projected/b867495c-e01f-46a2-aa93-e42cd53d1b64-kube-api-access-pnbfn\") pod \"b867495c-e01f-46a2-aa93-e42cd53d1b64\" (UID: \"b867495c-e01f-46a2-aa93-e42cd53d1b64\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.333091 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-config-data\") pod \"b113dd1e-6f00-4827-8945-48317056e181\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.333139 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-scripts\") pod \"b113dd1e-6f00-4827-8945-48317056e181\" (UID: \"b113dd1e-6f00-4827-8945-48317056e181\") " Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.339348 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b113dd1e-6f00-4827-8945-48317056e181" (UID: "b113dd1e-6f00-4827-8945-48317056e181"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.340109 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-scripts" (OuterVolumeSpecName: "scripts") pod "b113dd1e-6f00-4827-8945-48317056e181" (UID: "b113dd1e-6f00-4827-8945-48317056e181"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.340836 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b113dd1e-6f00-4827-8945-48317056e181-kube-api-access-qpt9q" (OuterVolumeSpecName: "kube-api-access-qpt9q") pod "b113dd1e-6f00-4827-8945-48317056e181" (UID: "b113dd1e-6f00-4827-8945-48317056e181"). InnerVolumeSpecName "kube-api-access-qpt9q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.342044 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b113dd1e-6f00-4827-8945-48317056e181" (UID: "b113dd1e-6f00-4827-8945-48317056e181"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.346115 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b867495c-e01f-46a2-aa93-e42cd53d1b64-kube-api-access-pnbfn" (OuterVolumeSpecName: "kube-api-access-pnbfn") pod "b867495c-e01f-46a2-aa93-e42cd53d1b64" (UID: "b867495c-e01f-46a2-aa93-e42cd53d1b64"). InnerVolumeSpecName "kube-api-access-pnbfn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.355591 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b867495c-e01f-46a2-aa93-e42cd53d1b64" (UID: "b867495c-e01f-46a2-aa93-e42cd53d1b64"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.368589 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b113dd1e-6f00-4827-8945-48317056e181" (UID: "b113dd1e-6f00-4827-8945-48317056e181"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.373617 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-q46gj" event={"ID":"b867495c-e01f-46a2-aa93-e42cd53d1b64","Type":"ContainerDied","Data":"7b893dc97f2dfb8acff342e64aea2c92a96882afe8b306260d5eca86f8200be7"} Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.373663 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b893dc97f2dfb8acff342e64aea2c92a96882afe8b306260d5eca86f8200be7" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.373732 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-q46gj" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.375285 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-config-data" (OuterVolumeSpecName: "config-data") pod "b113dd1e-6f00-4827-8945-48317056e181" (UID: "b113dd1e-6f00-4827-8945-48317056e181"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.376778 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-v9b86" event={"ID":"b113dd1e-6f00-4827-8945-48317056e181","Type":"ContainerDied","Data":"ba01e069b22b38d871777544b61a3f3ce2041399c54bd8e38e7c6d73460edd6e"} Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.376806 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba01e069b22b38d871777544b61a3f3ce2041399c54bd8e38e7c6d73460edd6e" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.376955 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-v9b86" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.380891 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b867495c-e01f-46a2-aa93-e42cd53d1b64" (UID: "b867495c-e01f-46a2-aa93-e42cd53d1b64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.389848 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-config-data" (OuterVolumeSpecName: "config-data") pod "b867495c-e01f-46a2-aa93-e42cd53d1b64" (UID: "b867495c-e01f-46a2-aa93-e42cd53d1b64"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.435918 4767 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436060 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436121 4767 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436181 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pnbfn\" (UniqueName: \"kubernetes.io/projected/b867495c-e01f-46a2-aa93-e42cd53d1b64-kube-api-access-pnbfn\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436268 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436337 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436396 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436452 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867495c-e01f-46a2-aa93-e42cd53d1b64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436503 4767 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b113dd1e-6f00-4827-8945-48317056e181-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.436575 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qpt9q\" (UniqueName: \"kubernetes.io/projected/b113dd1e-6f00-4827-8945-48317056e181-kube-api-access-qpt9q\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:36 crc kubenswrapper[4767]: I0128 18:50:36.806658 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="542e241a-df61-4376-8a81-8b82452979d0" path="/var/lib/kubelet/pods/542e241a-df61-4376-8a81-8b82452979d0/volumes" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.250511 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.251656 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5f9h554h9dh688h99hc5h687h5c9hb4h566h5f9h67fhb8h58bh5c7hb9hd5h8ch575h688h645h64dh77h558h5d5hf5h676h56ch5b6h5c5h675hc7q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-9xnc2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(a5d5a120-7ef9-431a-88a4-915986881b2d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.251926 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-v9b86"] Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.260110 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-v9b86"] Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.350771 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-2v2sq"] Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.351690 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="init" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.351799 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="init" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.351880 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" containerName="init" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.351955 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" containerName="init" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.352166 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b113dd1e-6f00-4827-8945-48317056e181" containerName="keystone-bootstrap" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.352255 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b113dd1e-6f00-4827-8945-48317056e181" containerName="keystone-bootstrap" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.352319 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerName="dnsmasq-dns" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.352379 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerName="dnsmasq-dns" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.352435 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.352592 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.352674 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b867495c-e01f-46a2-aa93-e42cd53d1b64" containerName="glance-db-sync" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.352729 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b867495c-e01f-46a2-aa93-e42cd53d1b64" containerName="glance-db-sync" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.352791 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerName="init" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.352839 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerName="init" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.353133 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bf6b1ad0-c7ad-4fbb-97ea-14f15a867d09" containerName="init" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.353254 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b867495c-e01f-46a2-aa93-e42cd53d1b64" containerName="glance-db-sync" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.353341 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.353412 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="80a845b8-6ce5-4ef0-9a8b-0c8f6cdc365a" containerName="dnsmasq-dns" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.353511 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b113dd1e-6f00-4827-8945-48317056e181" containerName="keystone-bootstrap" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.354380 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.360891 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.361066 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.361366 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-lb5dl" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.361578 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.361795 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.363616 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2v2sq"] Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.455361 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-scripts\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.455524 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxdrx\" (UniqueName: \"kubernetes.io/projected/a803158d-90c2-492f-a92b-709d0e1f214e-kube-api-access-jxdrx\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.455570 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-fernet-keys\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.455608 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-config-data\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.455679 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-combined-ca-bundle\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.455803 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-credential-keys\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.559161 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-scripts\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.559277 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxdrx\" (UniqueName: \"kubernetes.io/projected/a803158d-90c2-492f-a92b-709d0e1f214e-kube-api-access-jxdrx\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.559313 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-fernet-keys\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.559345 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-config-data\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.559402 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-combined-ca-bundle\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.559428 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-credential-keys\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.567375 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-combined-ca-bundle\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.567558 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-credential-keys\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.579196 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-scripts\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.583723 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-config-data\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.584892 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-fernet-keys\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.612127 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxdrx\" (UniqueName: \"kubernetes.io/projected/a803158d-90c2-492f-a92b-709d0e1f214e-kube-api-access-jxdrx\") pod \"keystone-bootstrap-2v2sq\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.634189 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.634394 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7zn6n,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-8bggl_openstack(b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:50:37 crc kubenswrapper[4767]: E0128 18:50:37.641639 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-8bggl" podUID="b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.683006 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.717306 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4b5np"] Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.719343 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.750296 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4b5np"] Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.868478 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.868575 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.868636 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.868672 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.868705 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-config\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.868750 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwcd8\" (UniqueName: \"kubernetes.io/projected/3aee08af-b80f-4f86-a11c-29227f00fb5b-kube-api-access-bwcd8\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.970403 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.970976 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.971007 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.971037 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-config\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.971080 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwcd8\" (UniqueName: \"kubernetes.io/projected/3aee08af-b80f-4f86-a11c-29227f00fb5b-kube-api-access-bwcd8\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.971139 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.971804 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-swift-storage-0\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.972118 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-nb\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.973150 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-sb\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.973657 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-svc\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:37 crc kubenswrapper[4767]: I0128 18:50:37.974316 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-config\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.054588 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwcd8\" (UniqueName: \"kubernetes.io/projected/3aee08af-b80f-4f86-a11c-29227f00fb5b-kube-api-access-bwcd8\") pod \"dnsmasq-dns-56df8fb6b7-4b5np\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:38 crc kubenswrapper[4767]: E0128 18:50:38.253748 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified" Jan 28 18:50:38 crc kubenswrapper[4767]: E0128 18:50:38.253954 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-db-sync,Image:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,Command:[/bin/bash],Args:[-c /usr/bin/heat-manage --config-dir /etc/heat/heat.conf.d db_sync],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/heat/heat.conf.d/00-default.conf,SubPath:00-default.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/heat/heat.conf.d/01-custom.conf,SubPath:01-custom.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xq4mq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-db-sync-5ptf6_openstack(b956870a-eae0-48fb-8e4f-182a9f276308): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:50:38 crc kubenswrapper[4767]: E0128 18:50:38.255469 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-db-sync-5ptf6" podUID="b956870a-eae0-48fb-8e4f-182a9f276308" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.268626 4767 scope.go:117] "RemoveContainer" containerID="bdd293df75c1a9231891f2ec61ba825228b2169f3e828744b0e0f6d529ebca2d" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.314245 4767 scope.go:117] "RemoveContainer" containerID="c6f9d5726aebc7dd73f0378e37e94829a8748b377b508453c1cf2243439643c7" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.342223 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:38 crc kubenswrapper[4767]: E0128 18:50:38.474059 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-8bggl" podUID="b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" Jan 28 18:50:38 crc kubenswrapper[4767]: E0128 18:50:38.474721 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified\\\"\"" pod="openstack/heat-db-sync-5ptf6" podUID="b956870a-eae0-48fb-8e4f-182a9f276308" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.603630 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-b8fbc5445-tbfrc" podUID="542e241a-df61-4376-8a81-8b82452979d0" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.116:5353: i/o timeout" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.613796 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.618109 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.622943 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.624024 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mvkq4" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.628844 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.693762 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.788198 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.788429 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-logs\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.788461 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.788485 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wxltn\" (UniqueName: \"kubernetes.io/projected/a7336cf1-ee15-4347-a96e-2049e488bdb5-kube-api-access-wxltn\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.788585 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.788654 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.788675 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.830722 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b113dd1e-6f00-4827-8945-48317056e181" path="/var/lib/kubelet/pods/b113dd1e-6f00-4827-8945-48317056e181/volumes" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.890448 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.892470 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.894840 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.894895 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.894928 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.894959 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-logs\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.895008 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.895035 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wxltn\" (UniqueName: \"kubernetes.io/projected/a7336cf1-ee15-4347-a96e-2049e488bdb5-kube-api-access-wxltn\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.895189 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.896803 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.897907 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.898151 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.900597 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.902566 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-logs\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.905425 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-scripts\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.906448 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.914529 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-config-data\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.921803 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wxltn\" (UniqueName: \"kubernetes.io/projected/a7336cf1-ee15-4347-a96e-2049e488bdb5-kube-api-access-wxltn\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.942392 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.996703 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.996857 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.997025 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6zlh\" (UniqueName: \"kubernetes.io/projected/219e126d-9e21-44b1-a71d-d3416f14d1c9-kube-api-access-g6zlh\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.997048 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.997251 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.997327 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.997360 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-logs\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:38 crc kubenswrapper[4767]: I0128 18:50:38.997414 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.077120 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4b5np"] Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108128 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108313 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6zlh\" (UniqueName: \"kubernetes.io/projected/219e126d-9e21-44b1-a71d-d3416f14d1c9-kube-api-access-g6zlh\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108500 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108582 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108626 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-logs\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108658 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108759 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.108787 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.109738 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.110029 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-logs\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.116219 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-config-data\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.117815 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-scripts\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.118708 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-2v2sq"] Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.124073 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.145808 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6zlh\" (UniqueName: \"kubernetes.io/projected/219e126d-9e21-44b1-a71d-d3416f14d1c9-kube-api-access-g6zlh\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.163659 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.302506 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.510505 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"6c4deef50f94ebc84f432ab68abee6b83fa4675bb3fde9668560bfed495791e5"} Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.516486 4767 generic.go:334] "Generic (PLEG): container finished" podID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerID="85aa7fe70cc5d6b23bcd3f1a605e3043a91213fe327bf2679cd7f6db839009ea" exitCode=0 Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.516563 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" event={"ID":"3aee08af-b80f-4f86-a11c-29227f00fb5b","Type":"ContainerDied","Data":"85aa7fe70cc5d6b23bcd3f1a605e3043a91213fe327bf2679cd7f6db839009ea"} Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.516659 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" event={"ID":"3aee08af-b80f-4f86-a11c-29227f00fb5b","Type":"ContainerStarted","Data":"bbfd6416c03ad7ffde3850da1aaffe0b2b6603e32d610e74d4d910add38f452a"} Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.518441 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2v2sq" event={"ID":"a803158d-90c2-492f-a92b-709d0e1f214e","Type":"ContainerStarted","Data":"8717ec56a843de32925286ff588395d3fd7a138e783baf0d9c100f0227c136f2"} Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.518501 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2v2sq" event={"ID":"a803158d-90c2-492f-a92b-709d0e1f214e","Type":"ContainerStarted","Data":"3b3aacd452368d8b05d552290d7259ee569229192186508410b8ca743660685d"} Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.603052 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-2v2sq" podStartSLOduration=2.60302968 podStartE2EDuration="2.60302968s" podCreationTimestamp="2026-01-28 18:50:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:50:39.579752054 +0000 UTC m=+1245.543934928" watchObservedRunningTime="2026-01-28 18:50:39.60302968 +0000 UTC m=+1245.567212554" Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.671728 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:39 crc kubenswrapper[4767]: I0128 18:50:39.974527 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:40 crc kubenswrapper[4767]: W0128 18:50:40.046380 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda7336cf1_ee15_4347_a96e_2049e488bdb5.slice/crio-b72abdf6cbad0b7c312b98f771134264afeb7a6ac654a5d857565aba2ca900f1 WatchSource:0}: Error finding container b72abdf6cbad0b7c312b98f771134264afeb7a6ac654a5d857565aba2ca900f1: Status 404 returned error can't find the container with id b72abdf6cbad0b7c312b98f771134264afeb7a6ac654a5d857565aba2ca900f1 Jan 28 18:50:40 crc kubenswrapper[4767]: W0128 18:50:40.063755 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod219e126d_9e21_44b1_a71d_d3416f14d1c9.slice/crio-73490008d07ab66ba9dd36d2a385870429b93148888f3404a4aedc1d4ade74de WatchSource:0}: Error finding container 73490008d07ab66ba9dd36d2a385870429b93148888f3404a4aedc1d4ade74de: Status 404 returned error can't find the container with id 73490008d07ab66ba9dd36d2a385870429b93148888f3404a4aedc1d4ade74de Jan 28 18:50:40 crc kubenswrapper[4767]: I0128 18:50:40.575945 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerStarted","Data":"4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa"} Jan 28 18:50:40 crc kubenswrapper[4767]: I0128 18:50:40.597523 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:40 crc kubenswrapper[4767]: I0128 18:50:40.604569 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7336cf1-ee15-4347-a96e-2049e488bdb5","Type":"ContainerStarted","Data":"b72abdf6cbad0b7c312b98f771134264afeb7a6ac654a5d857565aba2ca900f1"} Jan 28 18:50:40 crc kubenswrapper[4767]: I0128 18:50:40.609296 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"219e126d-9e21-44b1-a71d-d3416f14d1c9","Type":"ContainerStarted","Data":"73490008d07ab66ba9dd36d2a385870429b93148888f3404a4aedc1d4ade74de"} Jan 28 18:50:40 crc kubenswrapper[4767]: I0128 18:50:40.627936 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" podStartSLOduration=3.627911177 podStartE2EDuration="3.627911177s" podCreationTimestamp="2026-01-28 18:50:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:50:40.623973434 +0000 UTC m=+1246.588156318" watchObservedRunningTime="2026-01-28 18:50:40.627911177 +0000 UTC m=+1246.592094051" Jan 28 18:50:41 crc kubenswrapper[4767]: I0128 18:50:41.627250 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vm9r4" event={"ID":"28fde299-6f90-4c23-8a4f-15823bd8f4c5","Type":"ContainerStarted","Data":"305774a65ec9e3bddc549073e9797ae194a1589e90e26f371c30f463727655cc"} Jan 28 18:50:41 crc kubenswrapper[4767]: I0128 18:50:41.646285 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" event={"ID":"3aee08af-b80f-4f86-a11c-29227f00fb5b","Type":"ContainerStarted","Data":"df7372e3e19ffc60702cf139f694a349493503445b8ec8777123cbd66f4acbb2"} Jan 28 18:50:41 crc kubenswrapper[4767]: I0128 18:50:41.691890 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7336cf1-ee15-4347-a96e-2049e488bdb5","Type":"ContainerStarted","Data":"54f88fca5ce71168a02615519cfc2fc4f5054c1ba6a4a367ff752a5cbe0ec5b6"} Jan 28 18:50:41 crc kubenswrapper[4767]: I0128 18:50:41.703953 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-vm9r4" podStartSLOduration=4.575644239 podStartE2EDuration="50.70392757s" podCreationTimestamp="2026-01-28 18:49:51 +0000 UTC" firstStartedPulling="2026-01-28 18:49:53.297847236 +0000 UTC m=+1199.262030110" lastFinishedPulling="2026-01-28 18:50:39.426130567 +0000 UTC m=+1245.390313441" observedRunningTime="2026-01-28 18:50:41.692828184 +0000 UTC m=+1247.657011068" watchObservedRunningTime="2026-01-28 18:50:41.70392757 +0000 UTC m=+1247.668110444" Jan 28 18:50:41 crc kubenswrapper[4767]: I0128 18:50:41.761859 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"219e126d-9e21-44b1-a71d-d3416f14d1c9","Type":"ContainerStarted","Data":"5a632a9524b6340a3a846b2914f5f0bbbc8bb9f50af11dc37a2e64bb04f67ea3"} Jan 28 18:50:41 crc kubenswrapper[4767]: I0128 18:50:41.802534 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:42 crc kubenswrapper[4767]: I0128 18:50:42.175112 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.787764 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7336cf1-ee15-4347-a96e-2049e488bdb5","Type":"ContainerStarted","Data":"d7eee62be1dd40344a899832ed6709f04266b8604dc173a45cf3a05a414661bf"} Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.788189 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-log" containerID="cri-o://54f88fca5ce71168a02615519cfc2fc4f5054c1ba6a4a367ff752a5cbe0ec5b6" gracePeriod=30 Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.788564 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-httpd" containerID="cri-o://d7eee62be1dd40344a899832ed6709f04266b8604dc173a45cf3a05a414661bf" gracePeriod=30 Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.793768 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"219e126d-9e21-44b1-a71d-d3416f14d1c9","Type":"ContainerStarted","Data":"88fce6a9ea51d3a68a4b21b7ef30effd63a83206514419d0bd1a989a6ac6f498"} Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.794016 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-log" containerID="cri-o://5a632a9524b6340a3a846b2914f5f0bbbc8bb9f50af11dc37a2e64bb04f67ea3" gracePeriod=30 Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.794193 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-httpd" containerID="cri-o://88fce6a9ea51d3a68a4b21b7ef30effd63a83206514419d0bd1a989a6ac6f498" gracePeriod=30 Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.817636 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.817607259 podStartE2EDuration="6.817607259s" podCreationTimestamp="2026-01-28 18:50:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:50:43.809293579 +0000 UTC m=+1249.773476473" watchObservedRunningTime="2026-01-28 18:50:43.817607259 +0000 UTC m=+1249.781790133" Jan 28 18:50:43 crc kubenswrapper[4767]: I0128 18:50:43.844506 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.844483518 podStartE2EDuration="6.844483518s" podCreationTimestamp="2026-01-28 18:50:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:50:43.839312757 +0000 UTC m=+1249.803495641" watchObservedRunningTime="2026-01-28 18:50:43.844483518 +0000 UTC m=+1249.808666392" Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.817692 4767 generic.go:334] "Generic (PLEG): container finished" podID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerID="d7eee62be1dd40344a899832ed6709f04266b8604dc173a45cf3a05a414661bf" exitCode=0 Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.817734 4767 generic.go:334] "Generic (PLEG): container finished" podID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerID="54f88fca5ce71168a02615519cfc2fc4f5054c1ba6a4a367ff752a5cbe0ec5b6" exitCode=143 Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.819853 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7336cf1-ee15-4347-a96e-2049e488bdb5","Type":"ContainerDied","Data":"d7eee62be1dd40344a899832ed6709f04266b8604dc173a45cf3a05a414661bf"} Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.819891 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7336cf1-ee15-4347-a96e-2049e488bdb5","Type":"ContainerDied","Data":"54f88fca5ce71168a02615519cfc2fc4f5054c1ba6a4a367ff752a5cbe0ec5b6"} Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.822586 4767 generic.go:334] "Generic (PLEG): container finished" podID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerID="88fce6a9ea51d3a68a4b21b7ef30effd63a83206514419d0bd1a989a6ac6f498" exitCode=0 Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.822892 4767 generic.go:334] "Generic (PLEG): container finished" podID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerID="5a632a9524b6340a3a846b2914f5f0bbbc8bb9f50af11dc37a2e64bb04f67ea3" exitCode=143 Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.822929 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"219e126d-9e21-44b1-a71d-d3416f14d1c9","Type":"ContainerDied","Data":"88fce6a9ea51d3a68a4b21b7ef30effd63a83206514419d0bd1a989a6ac6f498"} Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.822946 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"219e126d-9e21-44b1-a71d-d3416f14d1c9","Type":"ContainerDied","Data":"5a632a9524b6340a3a846b2914f5f0bbbc8bb9f50af11dc37a2e64bb04f67ea3"} Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.827650 4767 generic.go:334] "Generic (PLEG): container finished" podID="a803158d-90c2-492f-a92b-709d0e1f214e" containerID="8717ec56a843de32925286ff588395d3fd7a138e783baf0d9c100f0227c136f2" exitCode=0 Jan 28 18:50:44 crc kubenswrapper[4767]: I0128 18:50:44.827679 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2v2sq" event={"ID":"a803158d-90c2-492f-a92b-709d0e1f214e","Type":"ContainerDied","Data":"8717ec56a843de32925286ff588395d3fd7a138e783baf0d9c100f0227c136f2"} Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.681122 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.688388 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.692872 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.796877 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-config-data\") pod \"219e126d-9e21-44b1-a71d-d3416f14d1c9\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.796946 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-scripts\") pod \"a7336cf1-ee15-4347-a96e-2049e488bdb5\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.796973 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxltn\" (UniqueName: \"kubernetes.io/projected/a7336cf1-ee15-4347-a96e-2049e488bdb5-kube-api-access-wxltn\") pod \"a7336cf1-ee15-4347-a96e-2049e488bdb5\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797029 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-config-data\") pod \"a7336cf1-ee15-4347-a96e-2049e488bdb5\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797058 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-scripts\") pod \"a803158d-90c2-492f-a92b-709d0e1f214e\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797094 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-config-data\") pod \"a803158d-90c2-492f-a92b-709d0e1f214e\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797166 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-httpd-run\") pod \"219e126d-9e21-44b1-a71d-d3416f14d1c9\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797329 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-fernet-keys\") pod \"a803158d-90c2-492f-a92b-709d0e1f214e\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797377 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-credential-keys\") pod \"a803158d-90c2-492f-a92b-709d0e1f214e\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797406 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"219e126d-9e21-44b1-a71d-d3416f14d1c9\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797457 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-combined-ca-bundle\") pod \"219e126d-9e21-44b1-a71d-d3416f14d1c9\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797526 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g6zlh\" (UniqueName: \"kubernetes.io/projected/219e126d-9e21-44b1-a71d-d3416f14d1c9-kube-api-access-g6zlh\") pod \"219e126d-9e21-44b1-a71d-d3416f14d1c9\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.797556 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-scripts\") pod \"219e126d-9e21-44b1-a71d-d3416f14d1c9\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.799966 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-httpd-run\") pod \"a7336cf1-ee15-4347-a96e-2049e488bdb5\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.800127 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-combined-ca-bundle\") pod \"a7336cf1-ee15-4347-a96e-2049e488bdb5\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.800265 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"a7336cf1-ee15-4347-a96e-2049e488bdb5\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.800408 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-combined-ca-bundle\") pod \"a803158d-90c2-492f-a92b-709d0e1f214e\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.800518 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-logs\") pod \"a7336cf1-ee15-4347-a96e-2049e488bdb5\" (UID: \"a7336cf1-ee15-4347-a96e-2049e488bdb5\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.800691 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxdrx\" (UniqueName: \"kubernetes.io/projected/a803158d-90c2-492f-a92b-709d0e1f214e-kube-api-access-jxdrx\") pod \"a803158d-90c2-492f-a92b-709d0e1f214e\" (UID: \"a803158d-90c2-492f-a92b-709d0e1f214e\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.800839 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-logs\") pod \"219e126d-9e21-44b1-a71d-d3416f14d1c9\" (UID: \"219e126d-9e21-44b1-a71d-d3416f14d1c9\") " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.799823 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "219e126d-9e21-44b1-a71d-d3416f14d1c9" (UID: "219e126d-9e21-44b1-a71d-d3416f14d1c9"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.804763 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-logs" (OuterVolumeSpecName: "logs") pod "a7336cf1-ee15-4347-a96e-2049e488bdb5" (UID: "a7336cf1-ee15-4347-a96e-2049e488bdb5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.808073 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "a7336cf1-ee15-4347-a96e-2049e488bdb5" (UID: "a7336cf1-ee15-4347-a96e-2049e488bdb5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.814442 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-logs" (OuterVolumeSpecName: "logs") pod "219e126d-9e21-44b1-a71d-d3416f14d1c9" (UID: "219e126d-9e21-44b1-a71d-d3416f14d1c9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.822263 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-scripts" (OuterVolumeSpecName: "scripts") pod "219e126d-9e21-44b1-a71d-d3416f14d1c9" (UID: "219e126d-9e21-44b1-a71d-d3416f14d1c9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.823668 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/219e126d-9e21-44b1-a71d-d3416f14d1c9-kube-api-access-g6zlh" (OuterVolumeSpecName: "kube-api-access-g6zlh") pod "219e126d-9e21-44b1-a71d-d3416f14d1c9" (UID: "219e126d-9e21-44b1-a71d-d3416f14d1c9"). InnerVolumeSpecName "kube-api-access-g6zlh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.850648 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a803158d-90c2-492f-a92b-709d0e1f214e-kube-api-access-jxdrx" (OuterVolumeSpecName: "kube-api-access-jxdrx") pod "a803158d-90c2-492f-a92b-709d0e1f214e" (UID: "a803158d-90c2-492f-a92b-709d0e1f214e"). InnerVolumeSpecName "kube-api-access-jxdrx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.861793 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "a7336cf1-ee15-4347-a96e-2049e488bdb5" (UID: "a7336cf1-ee15-4347-a96e-2049e488bdb5"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.861792 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "a803158d-90c2-492f-a92b-709d0e1f214e" (UID: "a803158d-90c2-492f-a92b-709d0e1f214e"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.861811 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "219e126d-9e21-44b1-a71d-d3416f14d1c9" (UID: "219e126d-9e21-44b1-a71d-d3416f14d1c9"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.861856 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7336cf1-ee15-4347-a96e-2049e488bdb5-kube-api-access-wxltn" (OuterVolumeSpecName: "kube-api-access-wxltn") pod "a7336cf1-ee15-4347-a96e-2049e488bdb5" (UID: "a7336cf1-ee15-4347-a96e-2049e488bdb5"). InnerVolumeSpecName "kube-api-access-wxltn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.861889 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "a803158d-90c2-492f-a92b-709d0e1f214e" (UID: "a803158d-90c2-492f-a92b-709d0e1f214e"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.862329 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-scripts" (OuterVolumeSpecName: "scripts") pod "a803158d-90c2-492f-a92b-709d0e1f214e" (UID: "a803158d-90c2-492f-a92b-709d0e1f214e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.865946 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "219e126d-9e21-44b1-a71d-d3416f14d1c9" (UID: "219e126d-9e21-44b1-a71d-d3416f14d1c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.868261 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a803158d-90c2-492f-a92b-709d0e1f214e" (UID: "a803158d-90c2-492f-a92b-709d0e1f214e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.872745 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.883039 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7336cf1-ee15-4347-a96e-2049e488bdb5" (UID: "a7336cf1-ee15-4347-a96e-2049e488bdb5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.883763 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-scripts" (OuterVolumeSpecName: "scripts") pod "a7336cf1-ee15-4347-a96e-2049e488bdb5" (UID: "a7336cf1-ee15-4347-a96e-2049e488bdb5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.886362 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-2v2sq" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.889715 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911434 4767 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911493 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911523 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911535 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911545 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7336cf1-ee15-4347-a96e-2049e488bdb5-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911557 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxdrx\" (UniqueName: \"kubernetes.io/projected/a803158d-90c2-492f-a92b-709d0e1f214e-kube-api-access-jxdrx\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911569 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911578 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911589 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxltn\" (UniqueName: \"kubernetes.io/projected/a7336cf1-ee15-4347-a96e-2049e488bdb5-kube-api-access-wxltn\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911597 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911606 4767 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/219e126d-9e21-44b1-a71d-d3416f14d1c9-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911617 4767 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911628 4767 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911649 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911662 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911675 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g6zlh\" (UniqueName: \"kubernetes.io/projected/219e126d-9e21-44b1-a71d-d3416f14d1c9-kube-api-access-g6zlh\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.911686 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.912908 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-config-data" (OuterVolumeSpecName: "config-data") pod "a803158d-90c2-492f-a92b-709d0e1f214e" (UID: "a803158d-90c2-492f-a92b-709d0e1f214e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.958824 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-config-data" (OuterVolumeSpecName: "config-data") pod "219e126d-9e21-44b1-a71d-d3416f14d1c9" (UID: "219e126d-9e21-44b1-a71d-d3416f14d1c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.965474 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 28 18:50:46 crc kubenswrapper[4767]: I0128 18:50:46.971676 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.009822 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-config-data" (OuterVolumeSpecName: "config-data") pod "a7336cf1-ee15-4347-a96e-2049e488bdb5" (UID: "a7336cf1-ee15-4347-a96e-2049e488bdb5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.020591 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/219e126d-9e21-44b1-a71d-d3416f14d1c9-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.021519 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7336cf1-ee15-4347-a96e-2049e488bdb5-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.021814 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"219e126d-9e21-44b1-a71d-d3416f14d1c9","Type":"ContainerDied","Data":"73490008d07ab66ba9dd36d2a385870429b93148888f3404a4aedc1d4ade74de"} Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.022405 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-58b8dff7b-297q9"] Jan 28 18:50:47 crc kubenswrapper[4767]: E0128 18:50:47.022879 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-httpd" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.022893 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-httpd" Jan 28 18:50:47 crc kubenswrapper[4767]: E0128 18:50:47.022913 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a803158d-90c2-492f-a92b-709d0e1f214e" containerName="keystone-bootstrap" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.022920 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a803158d-90c2-492f-a92b-709d0e1f214e" containerName="keystone-bootstrap" Jan 28 18:50:47 crc kubenswrapper[4767]: E0128 18:50:47.023061 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-log" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023074 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-log" Jan 28 18:50:47 crc kubenswrapper[4767]: E0128 18:50:47.023097 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-httpd" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023104 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-httpd" Jan 28 18:50:47 crc kubenswrapper[4767]: E0128 18:50:47.023116 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-log" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023123 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-log" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.021906 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a803158d-90c2-492f-a92b-709d0e1f214e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023252 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023274 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023270 4767 scope.go:117] "RemoveContainer" containerID="88fce6a9ea51d3a68a4b21b7ef30effd63a83206514419d0bd1a989a6ac6f498" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023334 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a803158d-90c2-492f-a92b-709d0e1f214e" containerName="keystone-bootstrap" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023384 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-httpd" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023399 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" containerName="glance-log" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023415 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-httpd" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.023424 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" containerName="glance-log" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.024022 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-2v2sq" event={"ID":"a803158d-90c2-492f-a92b-709d0e1f214e","Type":"ContainerDied","Data":"3b3aacd452368d8b05d552290d7259ee569229192186508410b8ca743660685d"} Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.024165 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b3aacd452368d8b05d552290d7259ee569229192186508410b8ca743660685d" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.024193 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a7336cf1-ee15-4347-a96e-2049e488bdb5","Type":"ContainerDied","Data":"b72abdf6cbad0b7c312b98f771134264afeb7a6ac654a5d857565aba2ca900f1"} Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.024233 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-58b8dff7b-297q9"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.024246 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.026898 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.027294 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.125540 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-credential-keys\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.126154 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-scripts\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.126251 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpwx9\" (UniqueName: \"kubernetes.io/projected/0784a151-311d-42e2-b27d-3a1ce38e28d9-kube-api-access-wpwx9\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.126291 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-public-tls-certs\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.126393 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-internal-tls-certs\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.126623 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-fernet-keys\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.126924 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-config-data\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.126994 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-combined-ca-bundle\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.204631 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.228916 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.232776 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-config-data\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.232815 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-combined-ca-bundle\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.232869 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-credential-keys\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.232897 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-scripts\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.232928 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpwx9\" (UniqueName: \"kubernetes.io/projected/0784a151-311d-42e2-b27d-3a1ce38e28d9-kube-api-access-wpwx9\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.232957 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-public-tls-certs\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.233022 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-internal-tls-certs\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.233052 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-fernet-keys\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.239829 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-scripts\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.242025 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-credential-keys\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.243532 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-fernet-keys\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.258450 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-combined-ca-bundle\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.263242 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-internal-tls-certs\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.274963 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-public-tls-certs\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.286253 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0784a151-311d-42e2-b27d-3a1ce38e28d9-config-data\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.289895 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.292011 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.295891 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-mvkq4" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.296302 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.296448 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.296612 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.304350 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpwx9\" (UniqueName: \"kubernetes.io/projected/0784a151-311d-42e2-b27d-3a1ce38e28d9-kube-api-access-wpwx9\") pod \"keystone-58b8dff7b-297q9\" (UID: \"0784a151-311d-42e2-b27d-3a1ce38e28d9\") " pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.308390 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.353891 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.369367 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.382283 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.384298 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.385228 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.388626 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.388907 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.393930 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438141 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438439 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438486 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438559 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-logs\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438590 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438776 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g947\" (UniqueName: \"kubernetes.io/projected/46edc543-4bb1-408d-babc-b542091bafa8-kube-api-access-2g947\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438902 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.438974 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.541842 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.541952 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.541981 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-config-data\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542011 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542054 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-scripts\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542072 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-logs\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542088 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542108 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542128 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g947\" (UniqueName: \"kubernetes.io/projected/46edc543-4bb1-408d-babc-b542091bafa8-kube-api-access-2g947\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542146 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j6p2v\" (UniqueName: \"kubernetes.io/projected/1dd0d0af-4059-48d5-9251-34c7f04df1c3-kube-api-access-j6p2v\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542181 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-logs\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542242 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542269 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542295 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542362 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542371 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.545767 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-logs\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.545818 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.542391 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.546386 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.551734 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.552140 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-config-data\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.565033 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-scripts\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.566015 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g947\" (UniqueName: \"kubernetes.io/projected/46edc543-4bb1-408d-babc-b542091bafa8-kube-api-access-2g947\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.581366 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.624408 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.648926 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-scripts\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.649009 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.649050 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j6p2v\" (UniqueName: \"kubernetes.io/projected/1dd0d0af-4059-48d5-9251-34c7f04df1c3-kube-api-access-j6p2v\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.649083 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-logs\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.649166 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.649391 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.649492 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.649544 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-config-data\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.651533 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-logs\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.651765 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.653387 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-scripts\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.655973 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.658264 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.658923 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-config-data\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.669707 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.677285 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j6p2v\" (UniqueName: \"kubernetes.io/projected/1dd0d0af-4059-48d5-9251-34c7f04df1c3-kube-api-access-j6p2v\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.692558 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " pod="openstack/glance-default-external-api-0" Jan 28 18:50:47 crc kubenswrapper[4767]: I0128 18:50:47.708958 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:50:48 crc kubenswrapper[4767]: I0128 18:50:48.344451 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:50:48 crc kubenswrapper[4767]: I0128 18:50:48.463261 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-hxs9q"] Jan 28 18:50:48 crc kubenswrapper[4767]: I0128 18:50:48.464610 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerName="dnsmasq-dns" containerID="cri-o://7ebb7015226cabcd6b9a74839917938386cc5f994638fbfec33a5c009f025b55" gracePeriod=10 Jan 28 18:50:48 crc kubenswrapper[4767]: I0128 18:50:48.813544 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="219e126d-9e21-44b1-a71d-d3416f14d1c9" path="/var/lib/kubelet/pods/219e126d-9e21-44b1-a71d-d3416f14d1c9/volumes" Jan 28 18:50:48 crc kubenswrapper[4767]: I0128 18:50:48.814752 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7336cf1-ee15-4347-a96e-2049e488bdb5" path="/var/lib/kubelet/pods/a7336cf1-ee15-4347-a96e-2049e488bdb5/volumes" Jan 28 18:50:48 crc kubenswrapper[4767]: I0128 18:50:48.926963 4767 generic.go:334] "Generic (PLEG): container finished" podID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerID="7ebb7015226cabcd6b9a74839917938386cc5f994638fbfec33a5c009f025b55" exitCode=0 Jan 28 18:50:48 crc kubenswrapper[4767]: I0128 18:50:48.927039 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" event={"ID":"e0c1967a-8d14-4e18-8049-640fccb74a19","Type":"ContainerDied","Data":"7ebb7015226cabcd6b9a74839917938386cc5f994638fbfec33a5c009f025b55"} Jan 28 18:50:49 crc kubenswrapper[4767]: I0128 18:50:49.697986 4767 scope.go:117] "RemoveContainer" containerID="5a632a9524b6340a3a846b2914f5f0bbbc8bb9f50af11dc37a2e64bb04f67ea3" Jan 28 18:50:50 crc kubenswrapper[4767]: I0128 18:50:50.044150 4767 scope.go:117] "RemoveContainer" containerID="d7eee62be1dd40344a899832ed6709f04266b8604dc173a45cf3a05a414661bf" Jan 28 18:50:50 crc kubenswrapper[4767]: I0128 18:50:50.089015 4767 scope.go:117] "RemoveContainer" containerID="54f88fca5ce71168a02615519cfc2fc4f5054c1ba6a4a367ff752a5cbe0ec5b6" Jan 28 18:50:50 crc kubenswrapper[4767]: W0128 18:50:50.449631 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0784a151_311d_42e2_b27d_3a1ce38e28d9.slice/crio-b1f4c2cb12503b1a506dcc0873ae13142e5d6dd7b37b5593e18b4886259e2d1b WatchSource:0}: Error finding container b1f4c2cb12503b1a506dcc0873ae13142e5d6dd7b37b5593e18b4886259e2d1b: Status 404 returned error can't find the container with id b1f4c2cb12503b1a506dcc0873ae13142e5d6dd7b37b5593e18b4886259e2d1b Jan 28 18:50:50 crc kubenswrapper[4767]: I0128 18:50:50.460881 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-58b8dff7b-297q9"] Jan 28 18:50:50 crc kubenswrapper[4767]: I0128 18:50:50.647398 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:50:50 crc kubenswrapper[4767]: W0128 18:50:50.657276 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1dd0d0af_4059_48d5_9251_34c7f04df1c3.slice/crio-ea1dd9945a43d228bd7fa0ebdd88e8b14271a8935d10992430f3f8d7243dbb69 WatchSource:0}: Error finding container ea1dd9945a43d228bd7fa0ebdd88e8b14271a8935d10992430f3f8d7243dbb69: Status 404 returned error can't find the container with id ea1dd9945a43d228bd7fa0ebdd88e8b14271a8935d10992430f3f8d7243dbb69 Jan 28 18:50:50 crc kubenswrapper[4767]: E0128 18:50:50.799142 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-5nj7s" podUID="008911ac-269d-47a4-a624-0e789415d794" Jan 28 18:50:50 crc kubenswrapper[4767]: I0128 18:50:50.979049 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1dd0d0af-4059-48d5-9251-34c7f04df1c3","Type":"ContainerStarted","Data":"ea1dd9945a43d228bd7fa0ebdd88e8b14271a8935d10992430f3f8d7243dbb69"} Jan 28 18:50:50 crc kubenswrapper[4767]: I0128 18:50:50.982762 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-58b8dff7b-297q9" event={"ID":"0784a151-311d-42e2-b27d-3a1ce38e28d9","Type":"ContainerStarted","Data":"b1f4c2cb12503b1a506dcc0873ae13142e5d6dd7b37b5593e18b4886259e2d1b"} Jan 28 18:50:51 crc kubenswrapper[4767]: I0128 18:50:51.334109 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:50:51 crc kubenswrapper[4767]: W0128 18:50:51.335717 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46edc543_4bb1_408d_babc_b542091bafa8.slice/crio-fbcff4811c3f56de6183377d2b87e1b844845a4861e5169719ea4ba9bb1359fb WatchSource:0}: Error finding container fbcff4811c3f56de6183377d2b87e1b844845a4861e5169719ea4ba9bb1359fb: Status 404 returned error can't find the container with id fbcff4811c3f56de6183377d2b87e1b844845a4861e5169719ea4ba9bb1359fb Jan 28 18:50:51 crc kubenswrapper[4767]: I0128 18:50:51.996008 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"46edc543-4bb1-408d-babc-b542091bafa8","Type":"ContainerStarted","Data":"fbcff4811c3f56de6183377d2b87e1b844845a4861e5169719ea4ba9bb1359fb"} Jan 28 18:50:52 crc kubenswrapper[4767]: I0128 18:50:52.283904 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.143:5353: connect: connection refused" Jan 28 18:50:52 crc kubenswrapper[4767]: I0128 18:50:52.949299 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.013969 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" event={"ID":"e0c1967a-8d14-4e18-8049-640fccb74a19","Type":"ContainerDied","Data":"1e2c48c2cffaa4df152cdf697cfaf52cce82de11854e7c32939dd97ea4f0ed2d"} Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.014022 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf78879c9-hxs9q" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.014063 4767 scope.go:117] "RemoveContainer" containerID="7ebb7015226cabcd6b9a74839917938386cc5f994638fbfec33a5c009f025b55" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.048594 4767 scope.go:117] "RemoveContainer" containerID="49a970cadcc270e3e35cf9086f9ee13fa8b2b9aff884ff6121445410c5312cfd" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.067462 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-swift-storage-0\") pod \"e0c1967a-8d14-4e18-8049-640fccb74a19\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.067515 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-svc\") pod \"e0c1967a-8d14-4e18-8049-640fccb74a19\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.067605 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-sb\") pod \"e0c1967a-8d14-4e18-8049-640fccb74a19\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.067776 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-config\") pod \"e0c1967a-8d14-4e18-8049-640fccb74a19\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.067798 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrff6\" (UniqueName: \"kubernetes.io/projected/e0c1967a-8d14-4e18-8049-640fccb74a19-kube-api-access-zrff6\") pod \"e0c1967a-8d14-4e18-8049-640fccb74a19\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.067915 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-nb\") pod \"e0c1967a-8d14-4e18-8049-640fccb74a19\" (UID: \"e0c1967a-8d14-4e18-8049-640fccb74a19\") " Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.081705 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e0c1967a-8d14-4e18-8049-640fccb74a19-kube-api-access-zrff6" (OuterVolumeSpecName: "kube-api-access-zrff6") pod "e0c1967a-8d14-4e18-8049-640fccb74a19" (UID: "e0c1967a-8d14-4e18-8049-640fccb74a19"). InnerVolumeSpecName "kube-api-access-zrff6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.122347 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e0c1967a-8d14-4e18-8049-640fccb74a19" (UID: "e0c1967a-8d14-4e18-8049-640fccb74a19"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.126063 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e0c1967a-8d14-4e18-8049-640fccb74a19" (UID: "e0c1967a-8d14-4e18-8049-640fccb74a19"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.128301 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e0c1967a-8d14-4e18-8049-640fccb74a19" (UID: "e0c1967a-8d14-4e18-8049-640fccb74a19"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.138751 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-config" (OuterVolumeSpecName: "config") pod "e0c1967a-8d14-4e18-8049-640fccb74a19" (UID: "e0c1967a-8d14-4e18-8049-640fccb74a19"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.139138 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e0c1967a-8d14-4e18-8049-640fccb74a19" (UID: "e0c1967a-8d14-4e18-8049-640fccb74a19"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.171117 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.171192 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.171236 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.171251 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.171263 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e0c1967a-8d14-4e18-8049-640fccb74a19-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.171274 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrff6\" (UniqueName: \"kubernetes.io/projected/e0c1967a-8d14-4e18-8049-640fccb74a19-kube-api-access-zrff6\") on node \"crc\" DevicePath \"\"" Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.349441 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-hxs9q"] Jan 28 18:50:53 crc kubenswrapper[4767]: I0128 18:50:53.358870 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf78879c9-hxs9q"] Jan 28 18:50:54 crc kubenswrapper[4767]: I0128 18:50:54.029462 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"46edc543-4bb1-408d-babc-b542091bafa8","Type":"ContainerStarted","Data":"7a4e13758cf6c450608437b107663b30e9e6bf86e3f86eaf412130a7117966e2"} Jan 28 18:50:54 crc kubenswrapper[4767]: I0128 18:50:54.044485 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1dd0d0af-4059-48d5-9251-34c7f04df1c3","Type":"ContainerStarted","Data":"bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45"} Jan 28 18:50:54 crc kubenswrapper[4767]: I0128 18:50:54.051445 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-58b8dff7b-297q9" event={"ID":"0784a151-311d-42e2-b27d-3a1ce38e28d9","Type":"ContainerStarted","Data":"6ee1e4f90160f64e0bb5647d0f4cce2adfd8f530579d50dbb8b5c173bb5e409a"} Jan 28 18:50:54 crc kubenswrapper[4767]: I0128 18:50:54.051751 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:50:54 crc kubenswrapper[4767]: I0128 18:50:54.089851 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-58b8dff7b-297q9" podStartSLOduration=8.089824293 podStartE2EDuration="8.089824293s" podCreationTimestamp="2026-01-28 18:50:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:50:54.082083201 +0000 UTC m=+1260.046266075" watchObservedRunningTime="2026-01-28 18:50:54.089824293 +0000 UTC m=+1260.054007167" Jan 28 18:50:54 crc kubenswrapper[4767]: I0128 18:50:54.875476 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" path="/var/lib/kubelet/pods/e0c1967a-8d14-4e18-8049-640fccb74a19/volumes" Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.063894 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1dd0d0af-4059-48d5-9251-34c7f04df1c3","Type":"ContainerStarted","Data":"a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2"} Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.068534 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5ptf6" event={"ID":"b956870a-eae0-48fb-8e4f-182a9f276308","Type":"ContainerStarted","Data":"5694634af50740938137546ad08c9781975843d69d312ad15e40d7db6424d4a0"} Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.070814 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"46edc543-4bb1-408d-babc-b542091bafa8","Type":"ContainerStarted","Data":"48d6b4934cfcf5fb0462346dd6a9cb82e588d8d558c9f955d23dc8d2654d1c16"} Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.073427 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8bggl" event={"ID":"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5","Type":"ContainerStarted","Data":"8fe6bb77e3b376735c42a645ecd669af4d9031c78aecf256f09b77a482fbe827"} Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.076110 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerStarted","Data":"6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908"} Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.091463 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=8.091425501 podStartE2EDuration="8.091425501s" podCreationTimestamp="2026-01-28 18:50:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:50:55.089015646 +0000 UTC m=+1261.053198530" watchObservedRunningTime="2026-01-28 18:50:55.091425501 +0000 UTC m=+1261.055608375" Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.125334 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-db-sync-5ptf6" podStartSLOduration=2.713901648 podStartE2EDuration="1m4.125306389s" podCreationTimestamp="2026-01-28 18:49:51 +0000 UTC" firstStartedPulling="2026-01-28 18:49:52.687036111 +0000 UTC m=+1198.651218985" lastFinishedPulling="2026-01-28 18:50:54.098440842 +0000 UTC m=+1260.062623726" observedRunningTime="2026-01-28 18:50:55.12087311 +0000 UTC m=+1261.085055984" watchObservedRunningTime="2026-01-28 18:50:55.125306389 +0000 UTC m=+1261.089489263" Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.138221 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-8bggl" podStartSLOduration=2.8200199230000003 podStartE2EDuration="1m4.138173211s" podCreationTimestamp="2026-01-28 18:49:51 +0000 UTC" firstStartedPulling="2026-01-28 18:49:53.117088971 +0000 UTC m=+1199.081271855" lastFinishedPulling="2026-01-28 18:50:54.435242269 +0000 UTC m=+1260.399425143" observedRunningTime="2026-01-28 18:50:55.13747215 +0000 UTC m=+1261.101655024" watchObservedRunningTime="2026-01-28 18:50:55.138173211 +0000 UTC m=+1261.102356085" Jan 28 18:50:55 crc kubenswrapper[4767]: I0128 18:50:55.165789 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=8.165753192 podStartE2EDuration="8.165753192s" podCreationTimestamp="2026-01-28 18:50:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:50:55.161175949 +0000 UTC m=+1261.125358823" watchObservedRunningTime="2026-01-28 18:50:55.165753192 +0000 UTC m=+1261.129936066" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.625462 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.626011 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.669113 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.675027 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.710568 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.710638 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.753688 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 18:50:57 crc kubenswrapper[4767]: I0128 18:50:57.769559 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 18:50:58 crc kubenswrapper[4767]: I0128 18:50:58.118414 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 18:50:58 crc kubenswrapper[4767]: I0128 18:50:58.118469 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 18:50:58 crc kubenswrapper[4767]: I0128 18:50:58.118482 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 18:50:58 crc kubenswrapper[4767]: I0128 18:50:58.118491 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 18:51:00 crc kubenswrapper[4767]: I0128 18:51:00.465307 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 18:51:00 crc kubenswrapper[4767]: I0128 18:51:00.465753 4767 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 18:51:00 crc kubenswrapper[4767]: I0128 18:51:00.468293 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 18:51:00 crc kubenswrapper[4767]: I0128 18:51:00.782934 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 18:51:01 crc kubenswrapper[4767]: I0128 18:51:01.655158 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 18:51:04 crc kubenswrapper[4767]: E0128 18:51:04.069355 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" Jan 28 18:51:04 crc kubenswrapper[4767]: I0128 18:51:04.211464 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerStarted","Data":"e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427"} Jan 28 18:51:04 crc kubenswrapper[4767]: I0128 18:51:04.212114 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:51:04 crc kubenswrapper[4767]: I0128 18:51:04.211738 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="sg-core" containerID="cri-o://6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908" gracePeriod=30 Jan 28 18:51:04 crc kubenswrapper[4767]: I0128 18:51:04.211785 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="proxy-httpd" containerID="cri-o://e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427" gracePeriod=30 Jan 28 18:51:04 crc kubenswrapper[4767]: I0128 18:51:04.211626 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="ceilometer-notification-agent" containerID="cri-o://4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa" gracePeriod=30 Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.224576 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5nj7s" event={"ID":"008911ac-269d-47a4-a624-0e789415d794","Type":"ContainerStarted","Data":"9cb422e929b3359f7658bb7c5266ba58cce4381d8d29730cc33c98395f2b24b0"} Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.226901 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vm9r4" event={"ID":"28fde299-6f90-4c23-8a4f-15823bd8f4c5","Type":"ContainerDied","Data":"305774a65ec9e3bddc549073e9797ae194a1589e90e26f371c30f463727655cc"} Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.226449 4767 generic.go:334] "Generic (PLEG): container finished" podID="28fde299-6f90-4c23-8a4f-15823bd8f4c5" containerID="305774a65ec9e3bddc549073e9797ae194a1589e90e26f371c30f463727655cc" exitCode=0 Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.230775 4767 generic.go:334] "Generic (PLEG): container finished" podID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerID="e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427" exitCode=0 Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.230817 4767 generic.go:334] "Generic (PLEG): container finished" podID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerID="6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908" exitCode=2 Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.231110 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerDied","Data":"e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427"} Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.231328 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerDied","Data":"6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908"} Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.250362 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-5nj7s" podStartSLOduration=2.781471129 podStartE2EDuration="1m14.250335806s" podCreationTimestamp="2026-01-28 18:49:51 +0000 UTC" firstStartedPulling="2026-01-28 18:49:52.949823647 +0000 UTC m=+1198.914006511" lastFinishedPulling="2026-01-28 18:51:04.418688314 +0000 UTC m=+1270.382871188" observedRunningTime="2026-01-28 18:51:05.246174037 +0000 UTC m=+1271.210356951" watchObservedRunningTime="2026-01-28 18:51:05.250335806 +0000 UTC m=+1271.214518680" Jan 28 18:51:05 crc kubenswrapper[4767]: I0128 18:51:05.934746 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.114073 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-sg-core-conf-yaml\") pod \"a5d5a120-7ef9-431a-88a4-915986881b2d\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.115022 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-scripts\") pod \"a5d5a120-7ef9-431a-88a4-915986881b2d\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.115232 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-run-httpd\") pod \"a5d5a120-7ef9-431a-88a4-915986881b2d\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.115353 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-combined-ca-bundle\") pod \"a5d5a120-7ef9-431a-88a4-915986881b2d\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.115651 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xnc2\" (UniqueName: \"kubernetes.io/projected/a5d5a120-7ef9-431a-88a4-915986881b2d-kube-api-access-9xnc2\") pod \"a5d5a120-7ef9-431a-88a4-915986881b2d\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.116971 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-config-data\") pod \"a5d5a120-7ef9-431a-88a4-915986881b2d\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.116733 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "a5d5a120-7ef9-431a-88a4-915986881b2d" (UID: "a5d5a120-7ef9-431a-88a4-915986881b2d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.117141 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-log-httpd\") pod \"a5d5a120-7ef9-431a-88a4-915986881b2d\" (UID: \"a5d5a120-7ef9-431a-88a4-915986881b2d\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.117772 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "a5d5a120-7ef9-431a-88a4-915986881b2d" (UID: "a5d5a120-7ef9-431a-88a4-915986881b2d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.118669 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.118709 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/a5d5a120-7ef9-431a-88a4-915986881b2d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.124686 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a5d5a120-7ef9-431a-88a4-915986881b2d-kube-api-access-9xnc2" (OuterVolumeSpecName: "kube-api-access-9xnc2") pod "a5d5a120-7ef9-431a-88a4-915986881b2d" (UID: "a5d5a120-7ef9-431a-88a4-915986881b2d"). InnerVolumeSpecName "kube-api-access-9xnc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.125337 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-scripts" (OuterVolumeSpecName: "scripts") pod "a5d5a120-7ef9-431a-88a4-915986881b2d" (UID: "a5d5a120-7ef9-431a-88a4-915986881b2d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.156410 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "a5d5a120-7ef9-431a-88a4-915986881b2d" (UID: "a5d5a120-7ef9-431a-88a4-915986881b2d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.177402 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a5d5a120-7ef9-431a-88a4-915986881b2d" (UID: "a5d5a120-7ef9-431a-88a4-915986881b2d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.199153 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-config-data" (OuterVolumeSpecName: "config-data") pod "a5d5a120-7ef9-431a-88a4-915986881b2d" (UID: "a5d5a120-7ef9-431a-88a4-915986881b2d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.221576 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xnc2\" (UniqueName: \"kubernetes.io/projected/a5d5a120-7ef9-431a-88a4-915986881b2d-kube-api-access-9xnc2\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.221643 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.221653 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.221664 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.221675 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5d5a120-7ef9-431a-88a4-915986881b2d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.246480 4767 generic.go:334] "Generic (PLEG): container finished" podID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerID="4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa" exitCode=0 Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.246562 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerDied","Data":"4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa"} Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.246760 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"a5d5a120-7ef9-431a-88a4-915986881b2d","Type":"ContainerDied","Data":"4a71f18e24e036be3bd6de9874d976e7f2597700c3f9e3303a6750367e1a2ec4"} Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.246803 4767 scope.go:117] "RemoveContainer" containerID="e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.246628 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.381184 4767 scope.go:117] "RemoveContainer" containerID="6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.427958 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.447181 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.481657 4767 scope.go:117] "RemoveContainer" containerID="4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.509194 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.509885 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="proxy-httpd" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.509905 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="proxy-httpd" Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.509928 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerName="init" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.509936 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerName="init" Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.509957 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerName="dnsmasq-dns" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.509965 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerName="dnsmasq-dns" Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.509982 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="sg-core" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.509989 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="sg-core" Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.510010 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="ceilometer-notification-agent" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.510019 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="ceilometer-notification-agent" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.510260 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="sg-core" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.510286 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="proxy-httpd" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.510308 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e0c1967a-8d14-4e18-8049-640fccb74a19" containerName="dnsmasq-dns" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.510318 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" containerName="ceilometer-notification-agent" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.517747 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.519780 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.530890 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.531768 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.638747 4767 scope.go:117] "RemoveContainer" containerID="e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.647802 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-scripts\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.647893 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.647926 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.647960 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-run-httpd\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.648009 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-config-data\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.648035 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddmmf\" (UniqueName: \"kubernetes.io/projected/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-kube-api-access-ddmmf\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.648074 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-log-httpd\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.650080 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427\": container with ID starting with e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427 not found: ID does not exist" containerID="e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.650160 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427"} err="failed to get container status \"e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427\": rpc error: code = NotFound desc = could not find container \"e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427\": container with ID starting with e08585ff016c95b9d09d0b82870bc7fd7e8ddabc8acedb23fcf564be3631b427 not found: ID does not exist" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.650325 4767 scope.go:117] "RemoveContainer" containerID="6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908" Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.652715 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908\": container with ID starting with 6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908 not found: ID does not exist" containerID="6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.652795 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908"} err="failed to get container status \"6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908\": rpc error: code = NotFound desc = could not find container \"6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908\": container with ID starting with 6d65c2c24eefeb951b5a00633bbe7dc8180efb7acf08182bf8594f78848c5908 not found: ID does not exist" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.652906 4767 scope.go:117] "RemoveContainer" containerID="4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa" Jan 28 18:51:06 crc kubenswrapper[4767]: E0128 18:51:06.656533 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa\": container with ID starting with 4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa not found: ID does not exist" containerID="4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.656624 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa"} err="failed to get container status \"4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa\": rpc error: code = NotFound desc = could not find container \"4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa\": container with ID starting with 4d82cc3564447ca0a4a978b9d99f58d6ca5af7550df2e6c4fd28b4ffb35223fa not found: ID does not exist" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.696648 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749150 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/28fde299-6f90-4c23-8a4f-15823bd8f4c5-etc-machine-id\") pod \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749238 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-scripts\") pod \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749313 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-db-sync-config-data\") pod \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749338 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65xzt\" (UniqueName: \"kubernetes.io/projected/28fde299-6f90-4c23-8a4f-15823bd8f4c5-kube-api-access-65xzt\") pod \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749400 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-config-data\") pod \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749475 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-combined-ca-bundle\") pod \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\" (UID: \"28fde299-6f90-4c23-8a4f-15823bd8f4c5\") " Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749604 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-config-data\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749626 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddmmf\" (UniqueName: \"kubernetes.io/projected/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-kube-api-access-ddmmf\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749661 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-log-httpd\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749725 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-scripts\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749763 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749802 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.749844 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-run-httpd\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.750568 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-run-httpd\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.765125 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/28fde299-6f90-4c23-8a4f-15823bd8f4c5-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "28fde299-6f90-4c23-8a4f-15823bd8f4c5" (UID: "28fde299-6f90-4c23-8a4f-15823bd8f4c5"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.784447 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "28fde299-6f90-4c23-8a4f-15823bd8f4c5" (UID: "28fde299-6f90-4c23-8a4f-15823bd8f4c5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.785907 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.789492 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-scripts\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.794271 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-config-data\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.804894 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-log-httpd\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.807806 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28fde299-6f90-4c23-8a4f-15823bd8f4c5-kube-api-access-65xzt" (OuterVolumeSpecName: "kube-api-access-65xzt") pod "28fde299-6f90-4c23-8a4f-15823bd8f4c5" (UID: "28fde299-6f90-4c23-8a4f-15823bd8f4c5"). InnerVolumeSpecName "kube-api-access-65xzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.816998 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.821681 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-scripts" (OuterVolumeSpecName: "scripts") pod "28fde299-6f90-4c23-8a4f-15823bd8f4c5" (UID: "28fde299-6f90-4c23-8a4f-15823bd8f4c5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.823023 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddmmf\" (UniqueName: \"kubernetes.io/projected/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-kube-api-access-ddmmf\") pod \"ceilometer-0\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.843431 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a5d5a120-7ef9-431a-88a4-915986881b2d" path="/var/lib/kubelet/pods/a5d5a120-7ef9-431a-88a4-915986881b2d/volumes" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.852671 4767 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.852735 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65xzt\" (UniqueName: \"kubernetes.io/projected/28fde299-6f90-4c23-8a4f-15823bd8f4c5-kube-api-access-65xzt\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.852750 4767 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/28fde299-6f90-4c23-8a4f-15823bd8f4c5-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.852759 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.858418 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28fde299-6f90-4c23-8a4f-15823bd8f4c5" (UID: "28fde299-6f90-4c23-8a4f-15823bd8f4c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.864128 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-config-data" (OuterVolumeSpecName: "config-data") pod "28fde299-6f90-4c23-8a4f-15823bd8f4c5" (UID: "28fde299-6f90-4c23-8a4f-15823bd8f4c5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.887329 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.958496 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:06 crc kubenswrapper[4767]: I0128 18:51:06.959423 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28fde299-6f90-4c23-8a4f-15823bd8f4c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.018163 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.260401 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-vm9r4" event={"ID":"28fde299-6f90-4c23-8a4f-15823bd8f4c5","Type":"ContainerDied","Data":"92597bae4fad0bf39706b62f0e95ae03752a0a75c02e2be437a4c1b499dcf395"} Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.260438 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-vm9r4" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.260467 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="92597bae4fad0bf39706b62f0e95ae03752a0a75c02e2be437a4c1b499dcf395" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.394052 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:07 crc kubenswrapper[4767]: W0128 18:51:07.400309 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c2f0944_d09c_4e0a_8bf9_c63ff9598e1b.slice/crio-2802f42e98fadbb8aaf33cbd373213e299ec3d79ee26d430f41956713c89b5ff WatchSource:0}: Error finding container 2802f42e98fadbb8aaf33cbd373213e299ec3d79ee26d430f41956713c89b5ff: Status 404 returned error can't find the container with id 2802f42e98fadbb8aaf33cbd373213e299ec3d79ee26d430f41956713c89b5ff Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.632076 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:07 crc kubenswrapper[4767]: E0128 18:51:07.632672 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28fde299-6f90-4c23-8a4f-15823bd8f4c5" containerName="cinder-db-sync" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.632694 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="28fde299-6f90-4c23-8a4f-15823bd8f4c5" containerName="cinder-db-sync" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.632946 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="28fde299-6f90-4c23-8a4f-15823bd8f4c5" containerName="cinder-db-sync" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.634366 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.637915 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.641010 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-xhhd2" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.641359 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.641437 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.653742 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.749433 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d6bd97c5-rb9kc"] Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.752511 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.771033 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d6bd97c5-rb9kc"] Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.780070 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-scripts\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.780234 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.780289 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3a17316-3eca-4517-8254-94224f26cf8f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.780309 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.780513 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.780600 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h2fsv\" (UniqueName: \"kubernetes.io/projected/f3a17316-3eca-4517-8254-94224f26cf8f-kube-api-access-h2fsv\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882506 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-swift-storage-0\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882604 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882661 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3a17316-3eca-4517-8254-94224f26cf8f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882727 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-config\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882760 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882917 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882960 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-sb\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.882999 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.883039 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-nb\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.883074 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxpz9\" (UniqueName: \"kubernetes.io/projected/04104678-852d-4c2f-90af-90d8a3a58b6a-kube-api-access-vxpz9\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.883117 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h2fsv\" (UniqueName: \"kubernetes.io/projected/f3a17316-3eca-4517-8254-94224f26cf8f-kube-api-access-h2fsv\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.883193 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-scripts\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.883745 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3a17316-3eca-4517-8254-94224f26cf8f-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.897712 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.899264 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.901226 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.904753 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-scripts\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.916814 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h2fsv\" (UniqueName: \"kubernetes.io/projected/f3a17316-3eca-4517-8254-94224f26cf8f-kube-api-access-h2fsv\") pod \"cinder-scheduler-0\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.924336 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.926816 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.930738 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.958245 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.981872 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.996924 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-config\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.997079 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.997126 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-sb\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.997171 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-nb\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.997233 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxpz9\" (UniqueName: \"kubernetes.io/projected/04104678-852d-4c2f-90af-90d8a3a58b6a-kube-api-access-vxpz9\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.997311 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-swift-storage-0\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.998590 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-swift-storage-0\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.999411 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-nb\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.999492 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-sb\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:07 crc kubenswrapper[4767]: I0128 18:51:07.999611 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.000059 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-config\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.022156 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxpz9\" (UniqueName: \"kubernetes.io/projected/04104678-852d-4c2f-90af-90d8a3a58b6a-kube-api-access-vxpz9\") pod \"dnsmasq-dns-5d6bd97c5-rb9kc\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.088155 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.099498 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/612ac632-407a-435a-8459-1fa31da72808-etc-machine-id\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.099671 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.099754 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/612ac632-407a-435a-8459-1fa31da72808-logs\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.099830 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data-custom\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.099888 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-scripts\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.099951 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.100025 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxjs4\" (UniqueName: \"kubernetes.io/projected/612ac632-407a-435a-8459-1fa31da72808-kube-api-access-zxjs4\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.204539 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.205159 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/612ac632-407a-435a-8459-1fa31da72808-logs\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.205313 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data-custom\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.205400 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-scripts\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.205538 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.208798 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/612ac632-407a-435a-8459-1fa31da72808-logs\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.210381 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxjs4\" (UniqueName: \"kubernetes.io/projected/612ac632-407a-435a-8459-1fa31da72808-kube-api-access-zxjs4\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.210473 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/612ac632-407a-435a-8459-1fa31da72808-etc-machine-id\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.217286 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/612ac632-407a-435a-8459-1fa31da72808-etc-machine-id\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.225688 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.226386 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-scripts\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.226703 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data-custom\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.239323 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxjs4\" (UniqueName: \"kubernetes.io/projected/612ac632-407a-435a-8459-1fa31da72808-kube-api-access-zxjs4\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.244399 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data\") pod \"cinder-api-0\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.289103 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerStarted","Data":"2802f42e98fadbb8aaf33cbd373213e299ec3d79ee26d430f41956713c89b5ff"} Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.402859 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.526185 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:08 crc kubenswrapper[4767]: I0128 18:51:08.780876 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d6bd97c5-rb9kc"] Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.010766 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.337502 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerStarted","Data":"684c67e751eddcda3281a8a2c54f6de29cb59043e3b20439004bc668caa20195"} Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.341413 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"612ac632-407a-435a-8459-1fa31da72808","Type":"ContainerStarted","Data":"0e8416c35cf29baf699b7358a885f49fc06d7f58908196cf437696b93551d5fb"} Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.352363 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f3a17316-3eca-4517-8254-94224f26cf8f","Type":"ContainerStarted","Data":"af05ebacd723dbf074ff88870af2fc051cea01fd76147056a4013000f1b69c31"} Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.357638 4767 generic.go:334] "Generic (PLEG): container finished" podID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerID="3780ed617ed6de3b751e83a24cc63869d372a4005e88f2cb4fc58e99c7e25017" exitCode=0 Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.358470 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" event={"ID":"04104678-852d-4c2f-90af-90d8a3a58b6a","Type":"ContainerDied","Data":"3780ed617ed6de3b751e83a24cc63869d372a4005e88f2cb4fc58e99c7e25017"} Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.358822 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" event={"ID":"04104678-852d-4c2f-90af-90d8a3a58b6a","Type":"ContainerStarted","Data":"53f1e11487a2e2410e0dae61c3679a4343097c14f04c0a774b8a631bc76d4793"} Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.371761 4767 generic.go:334] "Generic (PLEG): container finished" podID="b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" containerID="8fe6bb77e3b376735c42a645ecd669af4d9031c78aecf256f09b77a482fbe827" exitCode=0 Jan 28 18:51:09 crc kubenswrapper[4767]: I0128 18:51:09.371826 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8bggl" event={"ID":"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5","Type":"ContainerDied","Data":"8fe6bb77e3b376735c42a645ecd669af4d9031c78aecf256f09b77a482fbe827"} Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.004175 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.390981 4767 generic.go:334] "Generic (PLEG): container finished" podID="008911ac-269d-47a4-a624-0e789415d794" containerID="9cb422e929b3359f7658bb7c5266ba58cce4381d8d29730cc33c98395f2b24b0" exitCode=0 Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.391059 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5nj7s" event={"ID":"008911ac-269d-47a4-a624-0e789415d794","Type":"ContainerDied","Data":"9cb422e929b3359f7658bb7c5266ba58cce4381d8d29730cc33c98395f2b24b0"} Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.394086 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"612ac632-407a-435a-8459-1fa31da72808","Type":"ContainerStarted","Data":"2bedadb0f4c0d76329ee47681a409356b21ee37d054426231505994d2b60dd01"} Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.396269 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" event={"ID":"04104678-852d-4c2f-90af-90d8a3a58b6a","Type":"ContainerStarted","Data":"0d6247c793fff300b73634002317cb9fb72e71c6e74865f9b9595317f6939c7d"} Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.397535 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.400923 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerStarted","Data":"30cefcbdd816eae073d6d4942349808c5f7acf03b140d3df61367fe01f29739b"} Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.451421 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" podStartSLOduration=3.451393191 podStartE2EDuration="3.451393191s" podCreationTimestamp="2026-01-28 18:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:10.449406809 +0000 UTC m=+1276.413589693" watchObservedRunningTime="2026-01-28 18:51:10.451393191 +0000 UTC m=+1276.415576075" Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.789537 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8bggl" Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.889090 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-combined-ca-bundle\") pod \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.889428 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7zn6n\" (UniqueName: \"kubernetes.io/projected/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-kube-api-access-7zn6n\") pod \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.889493 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-db-sync-config-data\") pod \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\" (UID: \"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5\") " Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.897483 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" (UID: "b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.899628 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-kube-api-access-7zn6n" (OuterVolumeSpecName: "kube-api-access-7zn6n") pod "b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" (UID: "b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5"). InnerVolumeSpecName "kube-api-access-7zn6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:10 crc kubenswrapper[4767]: I0128 18:51:10.942473 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" (UID: "b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.000479 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7zn6n\" (UniqueName: \"kubernetes.io/projected/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-kube-api-access-7zn6n\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.000531 4767 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.001020 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.422684 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerStarted","Data":"a6f6dcaa950f2e48099ebd517c4a2212bd4802cf02f8895d9143224bbe5b0973"} Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.433582 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"612ac632-407a-435a-8459-1fa31da72808","Type":"ContainerStarted","Data":"dfd82978ffb94e1f29503e60421348d9787481df2f831157d0825af92102b39b"} Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.433839 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.433837 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api-log" containerID="cri-o://2bedadb0f4c0d76329ee47681a409356b21ee37d054426231505994d2b60dd01" gracePeriod=30 Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.433934 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api" containerID="cri-o://dfd82978ffb94e1f29503e60421348d9787481df2f831157d0825af92102b39b" gracePeriod=30 Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.437916 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f3a17316-3eca-4517-8254-94224f26cf8f","Type":"ContainerStarted","Data":"eb97552d14a30c1a1a8c79c52c695d9b6ade23c23938f659e96abc2f9dc96cdb"} Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.441877 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-8bggl" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.447611 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-8bggl" event={"ID":"b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5","Type":"ContainerDied","Data":"6c2a880bc50a5dfb99aa46d3454600c415046bfef6bd8fba3a5c371c95e4483c"} Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.447687 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c2a880bc50a5dfb99aa46d3454600c415046bfef6bd8fba3a5c371c95e4483c" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.495371 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.489886333 podStartE2EDuration="4.489886333s" podCreationTimestamp="2026-01-28 18:51:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:11.458989348 +0000 UTC m=+1277.423172232" watchObservedRunningTime="2026-01-28 18:51:11.489886333 +0000 UTC m=+1277.454069217" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.742275 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7c6ddf6ccf-ncm6l"] Jan 28 18:51:11 crc kubenswrapper[4767]: E0128 18:51:11.743523 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" containerName="barbican-db-sync" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.743553 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" containerName="barbican-db-sync" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.744143 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" containerName="barbican-db-sync" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.745612 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.765612 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.765792 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-b5nn8" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.765903 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.773314 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7c6ddf6ccf-ncm6l"] Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.826534 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6c5d6855b6-2h826"] Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.832105 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73ac02aa-a281-41be-8b19-b7171b2d7522-logs\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.832260 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmnxd\" (UniqueName: \"kubernetes.io/projected/73ac02aa-a281-41be-8b19-b7171b2d7522-kube-api-access-wmnxd\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.832311 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-config-data\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.832438 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-combined-ca-bundle\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.832483 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-config-data-custom\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.833984 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.847401 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.869273 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6c5d6855b6-2h826"] Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.940600 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmnxd\" (UniqueName: \"kubernetes.io/projected/73ac02aa-a281-41be-8b19-b7171b2d7522-kube-api-access-wmnxd\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.940704 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-config-data\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.940869 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-combined-ca-bundle\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.940916 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb4e951-e65c-46da-9fa1-d710799db1ff-logs\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.940988 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84rcb\" (UniqueName: \"kubernetes.io/projected/0fb4e951-e65c-46da-9fa1-d710799db1ff-kube-api-access-84rcb\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.941083 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-combined-ca-bundle\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.941162 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-config-data\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.945285 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-config-data-custom\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.945517 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-config-data-custom\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.945578 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73ac02aa-a281-41be-8b19-b7171b2d7522-logs\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.956325 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/73ac02aa-a281-41be-8b19-b7171b2d7522-logs\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.971941 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-config-data\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.986240 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-config-data-custom\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:11 crc kubenswrapper[4767]: I0128 18:51:11.993040 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmnxd\" (UniqueName: \"kubernetes.io/projected/73ac02aa-a281-41be-8b19-b7171b2d7522-kube-api-access-wmnxd\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.036544 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/73ac02aa-a281-41be-8b19-b7171b2d7522-combined-ca-bundle\") pod \"barbican-worker-7c6ddf6ccf-ncm6l\" (UID: \"73ac02aa-a281-41be-8b19-b7171b2d7522\") " pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.099990 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d6bd97c5-rb9kc"] Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.103157 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84rcb\" (UniqueName: \"kubernetes.io/projected/0fb4e951-e65c-46da-9fa1-d710799db1ff-kube-api-access-84rcb\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.103277 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-config-data\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.103352 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-config-data-custom\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.103512 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-combined-ca-bundle\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.103569 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb4e951-e65c-46da-9fa1-d710799db1ff-logs\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.104669 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0fb4e951-e65c-46da-9fa1-d710799db1ff-logs\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.110382 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.122289 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-combined-ca-bundle\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.125033 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-config-data\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.135474 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84rcb\" (UniqueName: \"kubernetes.io/projected/0fb4e951-e65c-46da-9fa1-d710799db1ff-kube-api-access-84rcb\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.139615 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0fb4e951-e65c-46da-9fa1-d710799db1ff-config-data-custom\") pod \"barbican-keystone-listener-6c5d6855b6-2h826\" (UID: \"0fb4e951-e65c-46da-9fa1-d710799db1ff\") " pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.179668 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v"] Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.182267 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.193550 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v"] Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.194035 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.207623 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q7m4s\" (UniqueName: \"kubernetes.io/projected/127f2e41-56a9-49d5-af60-577d5b3d7093-kube-api-access-q7m4s\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.207742 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.207801 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-config\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.207855 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.207970 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.208016 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.225574 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5c9944b8f4-9qvg4"] Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.229914 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.233353 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5c9944b8f4-9qvg4"] Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.234228 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.309409 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.309868 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.309937 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data-custom\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.309988 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q7m4s\" (UniqueName: \"kubernetes.io/projected/127f2e41-56a9-49d5-af60-577d5b3d7093-kube-api-access-q7m4s\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.310018 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-combined-ca-bundle\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.310061 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dd67540-b613-4585-b1f8-8a4ba27c3d93-logs\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.310084 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.310125 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.310459 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp8kt\" (UniqueName: \"kubernetes.io/projected/2dd67540-b613-4585-b1f8-8a4ba27c3d93-kube-api-access-fp8kt\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.310586 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-config\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.310772 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.311468 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-sb\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.312029 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-svc\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.312815 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-config\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.312931 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.313092 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-swift-storage-0\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.336229 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q7m4s\" (UniqueName: \"kubernetes.io/projected/127f2e41-56a9-49d5-af60-577d5b3d7093-kube-api-access-q7m4s\") pod \"dnsmasq-dns-5cc8b5d5c5-x7h6v\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.343507 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5nj7s" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.416687 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-config-data\") pod \"008911ac-269d-47a4-a624-0e789415d794\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.416780 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-combined-ca-bundle\") pod \"008911ac-269d-47a4-a624-0e789415d794\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.416848 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008911ac-269d-47a4-a624-0e789415d794-logs\") pod \"008911ac-269d-47a4-a624-0e789415d794\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.416974 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pdjlw\" (UniqueName: \"kubernetes.io/projected/008911ac-269d-47a4-a624-0e789415d794-kube-api-access-pdjlw\") pod \"008911ac-269d-47a4-a624-0e789415d794\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.417065 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-scripts\") pod \"008911ac-269d-47a4-a624-0e789415d794\" (UID: \"008911ac-269d-47a4-a624-0e789415d794\") " Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.419123 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/008911ac-269d-47a4-a624-0e789415d794-logs" (OuterVolumeSpecName: "logs") pod "008911ac-269d-47a4-a624-0e789415d794" (UID: "008911ac-269d-47a4-a624-0e789415d794"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.420789 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data-custom\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.420848 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-combined-ca-bundle\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.420913 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dd67540-b613-4585-b1f8-8a4ba27c3d93-logs\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.420976 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.421026 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp8kt\" (UniqueName: \"kubernetes.io/projected/2dd67540-b613-4585-b1f8-8a4ba27c3d93-kube-api-access-fp8kt\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.421177 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/008911ac-269d-47a4-a624-0e789415d794-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.422655 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dd67540-b613-4585-b1f8-8a4ba27c3d93-logs\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.433765 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.451276 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data-custom\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.492833 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp8kt\" (UniqueName: \"kubernetes.io/projected/2dd67540-b613-4585-b1f8-8a4ba27c3d93-kube-api-access-fp8kt\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.499603 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/008911ac-269d-47a4-a624-0e789415d794-kube-api-access-pdjlw" (OuterVolumeSpecName: "kube-api-access-pdjlw") pod "008911ac-269d-47a4-a624-0e789415d794" (UID: "008911ac-269d-47a4-a624-0e789415d794"). InnerVolumeSpecName "kube-api-access-pdjlw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.502412 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-scripts" (OuterVolumeSpecName: "scripts") pod "008911ac-269d-47a4-a624-0e789415d794" (UID: "008911ac-269d-47a4-a624-0e789415d794"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.513568 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-combined-ca-bundle\") pod \"barbican-api-5c9944b8f4-9qvg4\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.522886 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pdjlw\" (UniqueName: \"kubernetes.io/projected/008911ac-269d-47a4-a624-0e789415d794-kube-api-access-pdjlw\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.522920 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.534590 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-5nj7s" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.535362 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-5nj7s" event={"ID":"008911ac-269d-47a4-a624-0e789415d794","Type":"ContainerDied","Data":"986a1bd0bbfd7170982e95e22bbddcc7c7e5ba5b5f8ec96c86630d514904cf75"} Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.535443 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="986a1bd0bbfd7170982e95e22bbddcc7c7e5ba5b5f8ec96c86630d514904cf75" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.569302 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "008911ac-269d-47a4-a624-0e789415d794" (UID: "008911ac-269d-47a4-a624-0e789415d794"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.594631 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.615102 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.621467 4767 generic.go:334] "Generic (PLEG): container finished" podID="612ac632-407a-435a-8459-1fa31da72808" containerID="dfd82978ffb94e1f29503e60421348d9787481df2f831157d0825af92102b39b" exitCode=0 Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.621513 4767 generic.go:334] "Generic (PLEG): container finished" podID="612ac632-407a-435a-8459-1fa31da72808" containerID="2bedadb0f4c0d76329ee47681a409356b21ee37d054426231505994d2b60dd01" exitCode=143 Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.621582 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"612ac632-407a-435a-8459-1fa31da72808","Type":"ContainerDied","Data":"dfd82978ffb94e1f29503e60421348d9787481df2f831157d0825af92102b39b"} Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.621615 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"612ac632-407a-435a-8459-1fa31da72808","Type":"ContainerDied","Data":"2bedadb0f4c0d76329ee47681a409356b21ee37d054426231505994d2b60dd01"} Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.627475 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-c9f87f88d-pq4n5"] Jan 28 18:51:12 crc kubenswrapper[4767]: E0128 18:51:12.628073 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="008911ac-269d-47a4-a624-0e789415d794" containerName="placement-db-sync" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.628090 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="008911ac-269d-47a4-a624-0e789415d794" containerName="placement-db-sync" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.635624 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="008911ac-269d-47a4-a624-0e789415d794" containerName="placement-db-sync" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.637031 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.637703 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-config-data" (OuterVolumeSpecName: "config-data") pod "008911ac-269d-47a4-a624-0e789415d794" (UID: "008911ac-269d-47a4-a624-0e789415d794"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.649761 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.650057 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.660854 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.661214 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/008911ac-269d-47a4-a624-0e789415d794-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.690757 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c9f87f88d-pq4n5"] Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.724315 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f3a17316-3eca-4517-8254-94224f26cf8f","Type":"ContainerStarted","Data":"c6a690ed99d5f6b40c97054e865df89e3db1b412869542800d352c6d078f78e6"} Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.766444 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-combined-ca-bundle\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.766520 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-public-tls-certs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.766574 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96jdm\" (UniqueName: \"kubernetes.io/projected/15662321-cd3a-4aa2-9a5c-277fcffc3c79-kube-api-access-96jdm\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.766664 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-scripts\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.766682 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-config-data\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.766764 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-internal-tls-certs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.766784 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15662321-cd3a-4aa2-9a5c-277fcffc3c79-logs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.816280 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.728748653 podStartE2EDuration="5.816249074s" podCreationTimestamp="2026-01-28 18:51:07 +0000 UTC" firstStartedPulling="2026-01-28 18:51:08.558179668 +0000 UTC m=+1274.522362542" lastFinishedPulling="2026-01-28 18:51:09.645680089 +0000 UTC m=+1275.609862963" observedRunningTime="2026-01-28 18:51:12.791742969 +0000 UTC m=+1278.755925853" watchObservedRunningTime="2026-01-28 18:51:12.816249074 +0000 UTC m=+1278.780431948" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.873680 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-combined-ca-bundle\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.873828 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-public-tls-certs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.873904 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96jdm\" (UniqueName: \"kubernetes.io/projected/15662321-cd3a-4aa2-9a5c-277fcffc3c79-kube-api-access-96jdm\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.874073 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-scripts\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.874127 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-config-data\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.874308 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-internal-tls-certs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.874355 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15662321-cd3a-4aa2-9a5c-277fcffc3c79-logs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.886400 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/15662321-cd3a-4aa2-9a5c-277fcffc3c79-logs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.912321 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-combined-ca-bundle\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:12 crc kubenswrapper[4767]: I0128 18:51:12.983352 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.158488 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6c5d6855b6-2h826"] Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.181399 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7c6ddf6ccf-ncm6l"] Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.302774 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-scripts\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.312364 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96jdm\" (UniqueName: \"kubernetes.io/projected/15662321-cd3a-4aa2-9a5c-277fcffc3c79-kube-api-access-96jdm\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.317616 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-public-tls-certs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:13 crc kubenswrapper[4767]: W0128 18:51:13.332451 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0fb4e951_e65c_46da_9fa1_d710799db1ff.slice/crio-6d91ef25aaafd83a3667283209eeabf8213b88a832933bf265794cd08abbbc2f WatchSource:0}: Error finding container 6d91ef25aaafd83a3667283209eeabf8213b88a832933bf265794cd08abbbc2f: Status 404 returned error can't find the container with id 6d91ef25aaafd83a3667283209eeabf8213b88a832933bf265794cd08abbbc2f Jan 28 18:51:13 crc kubenswrapper[4767]: W0128 18:51:13.335793 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod73ac02aa_a281_41be_8b19_b7171b2d7522.slice/crio-57175fbdcac61e2c126ab519e26e2640770376d54ff8c035e0a5c8876a511e21 WatchSource:0}: Error finding container 57175fbdcac61e2c126ab519e26e2640770376d54ff8c035e0a5c8876a511e21: Status 404 returned error can't find the container with id 57175fbdcac61e2c126ab519e26e2640770376d54ff8c035e0a5c8876a511e21 Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.470302 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.549493 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v"] Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.601913 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxjs4\" (UniqueName: \"kubernetes.io/projected/612ac632-407a-435a-8459-1fa31da72808-kube-api-access-zxjs4\") pod \"612ac632-407a-435a-8459-1fa31da72808\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.602076 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data-custom\") pod \"612ac632-407a-435a-8459-1fa31da72808\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.602171 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/612ac632-407a-435a-8459-1fa31da72808-logs\") pod \"612ac632-407a-435a-8459-1fa31da72808\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.602269 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data\") pod \"612ac632-407a-435a-8459-1fa31da72808\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.602399 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-scripts\") pod \"612ac632-407a-435a-8459-1fa31da72808\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.602508 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/612ac632-407a-435a-8459-1fa31da72808-etc-machine-id\") pod \"612ac632-407a-435a-8459-1fa31da72808\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.602550 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-combined-ca-bundle\") pod \"612ac632-407a-435a-8459-1fa31da72808\" (UID: \"612ac632-407a-435a-8459-1fa31da72808\") " Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.603172 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/612ac632-407a-435a-8459-1fa31da72808-logs" (OuterVolumeSpecName: "logs") pod "612ac632-407a-435a-8459-1fa31da72808" (UID: "612ac632-407a-435a-8459-1fa31da72808"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.603336 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/612ac632-407a-435a-8459-1fa31da72808-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "612ac632-407a-435a-8459-1fa31da72808" (UID: "612ac632-407a-435a-8459-1fa31da72808"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.609258 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "612ac632-407a-435a-8459-1fa31da72808" (UID: "612ac632-407a-435a-8459-1fa31da72808"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.612359 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-internal-tls-certs\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.613316 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/15662321-cd3a-4aa2-9a5c-277fcffc3c79-config-data\") pod \"placement-c9f87f88d-pq4n5\" (UID: \"15662321-cd3a-4aa2-9a5c-277fcffc3c79\") " pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.634100 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/612ac632-407a-435a-8459-1fa31da72808-kube-api-access-zxjs4" (OuterVolumeSpecName: "kube-api-access-zxjs4") pod "612ac632-407a-435a-8459-1fa31da72808" (UID: "612ac632-407a-435a-8459-1fa31da72808"). InnerVolumeSpecName "kube-api-access-zxjs4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.634717 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-scripts" (OuterVolumeSpecName: "scripts") pod "612ac632-407a-435a-8459-1fa31da72808" (UID: "612ac632-407a-435a-8459-1fa31da72808"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.648720 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.649336 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5c9944b8f4-9qvg4"] Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.674592 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "612ac632-407a-435a-8459-1fa31da72808" (UID: "612ac632-407a-435a-8459-1fa31da72808"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.713442 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.713762 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/612ac632-407a-435a-8459-1fa31da72808-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.713857 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.713937 4767 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/612ac632-407a-435a-8459-1fa31da72808-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.714046 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.714156 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxjs4\" (UniqueName: \"kubernetes.io/projected/612ac632-407a-435a-8459-1fa31da72808-kube-api-access-zxjs4\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.736814 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data" (OuterVolumeSpecName: "config-data") pod "612ac632-407a-435a-8459-1fa31da72808" (UID: "612ac632-407a-435a-8459-1fa31da72808"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.759122 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" event={"ID":"127f2e41-56a9-49d5-af60-577d5b3d7093","Type":"ContainerStarted","Data":"c1bc259d86054336aa2772a76a0d32472697438274764c30169cf6d97ee66d3b"} Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.765539 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" event={"ID":"0fb4e951-e65c-46da-9fa1-d710799db1ff","Type":"ContainerStarted","Data":"6d91ef25aaafd83a3667283209eeabf8213b88a832933bf265794cd08abbbc2f"} Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.771344 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9944b8f4-9qvg4" event={"ID":"2dd67540-b613-4585-b1f8-8a4ba27c3d93","Type":"ContainerStarted","Data":"48a01ca9465fec6b3a35587997622238cfe9ae1c5f534806077914e394d931ec"} Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.786317 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"612ac632-407a-435a-8459-1fa31da72808","Type":"ContainerDied","Data":"0e8416c35cf29baf699b7358a885f49fc06d7f58908196cf437696b93551d5fb"} Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.786657 4767 scope.go:117] "RemoveContainer" containerID="dfd82978ffb94e1f29503e60421348d9787481df2f831157d0825af92102b39b" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.786942 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.802693 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" event={"ID":"73ac02aa-a281-41be-8b19-b7171b2d7522","Type":"ContainerStarted","Data":"57175fbdcac61e2c126ab519e26e2640770376d54ff8c035e0a5c8876a511e21"} Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.805458 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" podUID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerName="dnsmasq-dns" containerID="cri-o://0d6247c793fff300b73634002317cb9fb72e71c6e74865f9b9595317f6939c7d" gracePeriod=10 Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.832646 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/612ac632-407a-435a-8459-1fa31da72808-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.898523 4767 scope.go:117] "RemoveContainer" containerID="2bedadb0f4c0d76329ee47681a409356b21ee37d054426231505994d2b60dd01" Jan 28 18:51:13 crc kubenswrapper[4767]: I0128 18:51:13.954002 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.063153 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.127506 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:14 crc kubenswrapper[4767]: E0128 18:51:14.128575 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api-log" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.128610 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api-log" Jan 28 18:51:14 crc kubenswrapper[4767]: E0128 18:51:14.128659 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.128667 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.129020 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.129072 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="612ac632-407a-435a-8459-1fa31da72808" containerName="cinder-api-log" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.130729 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.134217 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.134234 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.136647 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.152165 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.162474 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-c9f87f88d-pq4n5"] Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.293694 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-config-data-custom\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.294245 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbpjw\" (UniqueName: \"kubernetes.io/projected/f6eb637e-af35-4164-bf33-abd4c7049906-kube-api-access-jbpjw\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.294461 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-scripts\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.294631 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f6eb637e-af35-4164-bf33-abd4c7049906-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.294764 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.294929 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-public-tls-certs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.295130 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6eb637e-af35-4164-bf33-abd4c7049906-logs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.295362 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-config-data\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.295443 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.397989 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbpjw\" (UniqueName: \"kubernetes.io/projected/f6eb637e-af35-4164-bf33-abd4c7049906-kube-api-access-jbpjw\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398069 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-scripts\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398128 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f6eb637e-af35-4164-bf33-abd4c7049906-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398156 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398185 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-public-tls-certs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398401 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6eb637e-af35-4164-bf33-abd4c7049906-logs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398441 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-config-data\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398482 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.398623 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-config-data-custom\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.402193 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f6eb637e-af35-4164-bf33-abd4c7049906-etc-machine-id\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.403549 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f6eb637e-af35-4164-bf33-abd4c7049906-logs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.416944 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-config-data\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.417818 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.418734 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-scripts\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.418902 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-public-tls-certs\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.420064 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-config-data-custom\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.444086 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbpjw\" (UniqueName: \"kubernetes.io/projected/f6eb637e-af35-4164-bf33-abd4c7049906-kube-api-access-jbpjw\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.444197 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f6eb637e-af35-4164-bf33-abd4c7049906-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"f6eb637e-af35-4164-bf33-abd4c7049906\") " pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.495283 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.840850 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="612ac632-407a-435a-8459-1fa31da72808" path="/var/lib/kubelet/pods/612ac632-407a-435a-8459-1fa31da72808/volumes" Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.847364 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" event={"ID":"127f2e41-56a9-49d5-af60-577d5b3d7093","Type":"ContainerDied","Data":"cf22ce5e7df7fc3eaadf896cedc0c1799cfff94aed2f7c3bda1986534359f511"} Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.845569 4767 generic.go:334] "Generic (PLEG): container finished" podID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerID="cf22ce5e7df7fc3eaadf896cedc0c1799cfff94aed2f7c3bda1986534359f511" exitCode=0 Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.874168 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9944b8f4-9qvg4" event={"ID":"2dd67540-b613-4585-b1f8-8a4ba27c3d93","Type":"ContainerStarted","Data":"54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792"} Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.883051 4767 generic.go:334] "Generic (PLEG): container finished" podID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerID="0d6247c793fff300b73634002317cb9fb72e71c6e74865f9b9595317f6939c7d" exitCode=0 Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.883148 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" event={"ID":"04104678-852d-4c2f-90af-90d8a3a58b6a","Type":"ContainerDied","Data":"0d6247c793fff300b73634002317cb9fb72e71c6e74865f9b9595317f6939c7d"} Jan 28 18:51:14 crc kubenswrapper[4767]: I0128 18:51:14.889750 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c9f87f88d-pq4n5" event={"ID":"15662321-cd3a-4aa2-9a5c-277fcffc3c79","Type":"ContainerStarted","Data":"993f10adf474d7b6c6e04fc81615df646f677dd4548b39e2d050e22deca06c3c"} Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.151776 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 28 18:51:15 crc kubenswrapper[4767]: W0128 18:51:15.176371 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6eb637e_af35_4164_bf33_abd4c7049906.slice/crio-4f610597ddc83686a53fbcdadcfda232047591ccd060940e17cec914260dd745 WatchSource:0}: Error finding container 4f610597ddc83686a53fbcdadcfda232047591ccd060940e17cec914260dd745: Status 404 returned error can't find the container with id 4f610597ddc83686a53fbcdadcfda232047591ccd060940e17cec914260dd745 Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.233009 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.326438 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc\") pod \"04104678-852d-4c2f-90af-90d8a3a58b6a\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.326567 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-swift-storage-0\") pod \"04104678-852d-4c2f-90af-90d8a3a58b6a\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.326654 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vxpz9\" (UniqueName: \"kubernetes.io/projected/04104678-852d-4c2f-90af-90d8a3a58b6a-kube-api-access-vxpz9\") pod \"04104678-852d-4c2f-90af-90d8a3a58b6a\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.326687 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-nb\") pod \"04104678-852d-4c2f-90af-90d8a3a58b6a\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.326791 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-sb\") pod \"04104678-852d-4c2f-90af-90d8a3a58b6a\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.326951 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-config\") pod \"04104678-852d-4c2f-90af-90d8a3a58b6a\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.358733 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04104678-852d-4c2f-90af-90d8a3a58b6a-kube-api-access-vxpz9" (OuterVolumeSpecName: "kube-api-access-vxpz9") pod "04104678-852d-4c2f-90af-90d8a3a58b6a" (UID: "04104678-852d-4c2f-90af-90d8a3a58b6a"). InnerVolumeSpecName "kube-api-access-vxpz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.407597 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "04104678-852d-4c2f-90af-90d8a3a58b6a" (UID: "04104678-852d-4c2f-90af-90d8a3a58b6a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.414039 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "04104678-852d-4c2f-90af-90d8a3a58b6a" (UID: "04104678-852d-4c2f-90af-90d8a3a58b6a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.421501 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "04104678-852d-4c2f-90af-90d8a3a58b6a" (UID: "04104678-852d-4c2f-90af-90d8a3a58b6a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.422993 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-config" (OuterVolumeSpecName: "config") pod "04104678-852d-4c2f-90af-90d8a3a58b6a" (UID: "04104678-852d-4c2f-90af-90d8a3a58b6a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434079 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "04104678-852d-4c2f-90af-90d8a3a58b6a" (UID: "04104678-852d-4c2f-90af-90d8a3a58b6a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434300 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc\") pod \"04104678-852d-4c2f-90af-90d8a3a58b6a\" (UID: \"04104678-852d-4c2f-90af-90d8a3a58b6a\") " Jan 28 18:51:15 crc kubenswrapper[4767]: W0128 18:51:15.434420 4767 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/04104678-852d-4c2f-90af-90d8a3a58b6a/volumes/kubernetes.io~configmap/dns-svc Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434435 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "04104678-852d-4c2f-90af-90d8a3a58b6a" (UID: "04104678-852d-4c2f-90af-90d8a3a58b6a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434723 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434738 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434749 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434758 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434768 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vxpz9\" (UniqueName: \"kubernetes.io/projected/04104678-852d-4c2f-90af-90d8a3a58b6a-kube-api-access-vxpz9\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.434778 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/04104678-852d-4c2f-90af-90d8a3a58b6a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.718410 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-df7856cf4-bs48j"] Jan 28 18:51:15 crc kubenswrapper[4767]: E0128 18:51:15.718951 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerName="init" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.718970 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerName="init" Jan 28 18:51:15 crc kubenswrapper[4767]: E0128 18:51:15.718997 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerName="dnsmasq-dns" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.719003 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerName="dnsmasq-dns" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.719185 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="04104678-852d-4c2f-90af-90d8a3a58b6a" containerName="dnsmasq-dns" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.720242 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.727940 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.730307 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.748611 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-df7856cf4-bs48j"] Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.873682 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk4xh\" (UniqueName: \"kubernetes.io/projected/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-kube-api-access-sk4xh\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.874694 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-logs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.875241 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-config-data\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.875355 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-config-data-custom\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.875481 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-combined-ca-bundle\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.875566 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-internal-tls-certs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.875716 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-public-tls-certs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.909034 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9944b8f4-9qvg4" event={"ID":"2dd67540-b613-4585-b1f8-8a4ba27c3d93","Type":"ContainerStarted","Data":"190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10"} Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.909196 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.912358 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" event={"ID":"04104678-852d-4c2f-90af-90d8a3a58b6a","Type":"ContainerDied","Data":"53f1e11487a2e2410e0dae61c3679a4343097c14f04c0a774b8a631bc76d4793"} Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.912471 4767 scope.go:117] "RemoveContainer" containerID="0d6247c793fff300b73634002317cb9fb72e71c6e74865f9b9595317f6939c7d" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.912610 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d6bd97c5-rb9kc" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.916144 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f6eb637e-af35-4164-bf33-abd4c7049906","Type":"ContainerStarted","Data":"4f610597ddc83686a53fbcdadcfda232047591ccd060940e17cec914260dd745"} Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.925417 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c9f87f88d-pq4n5" event={"ID":"15662321-cd3a-4aa2-9a5c-277fcffc3c79","Type":"ContainerStarted","Data":"70270552d4a10f58264c561103711e893e74e52654cbe53531260a78e17eb724"} Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.939955 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podStartSLOduration=3.939918884 podStartE2EDuration="3.939918884s" podCreationTimestamp="2026-01-28 18:51:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:15.930276982 +0000 UTC m=+1281.894459876" watchObservedRunningTime="2026-01-28 18:51:15.939918884 +0000 UTC m=+1281.904101758" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.940850 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerStarted","Data":"55fb81d295ed99b54e1f16699abf14b85bc06236365f68f6d01658b57027ba48"} Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.941099 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="proxy-httpd" containerID="cri-o://55fb81d295ed99b54e1f16699abf14b85bc06236365f68f6d01658b57027ba48" gracePeriod=30 Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.941115 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.941123 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="sg-core" containerID="cri-o://a6f6dcaa950f2e48099ebd517c4a2212bd4802cf02f8895d9143224bbe5b0973" gracePeriod=30 Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.941131 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-notification-agent" containerID="cri-o://30cefcbdd816eae073d6d4942349808c5f7acf03b140d3df61367fe01f29739b" gracePeriod=30 Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.941300 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-central-agent" containerID="cri-o://684c67e751eddcda3281a8a2c54f6de29cb59043e3b20439004bc668caa20195" gracePeriod=30 Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.947621 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" event={"ID":"127f2e41-56a9-49d5-af60-577d5b3d7093","Type":"ContainerStarted","Data":"50e5b7616f3f00ab5c90b6b604f9b08b8af456577f56689e6606244b7e8f5f0b"} Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.948697 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.977802 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-config-data\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.977904 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-config-data-custom\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.977988 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-combined-ca-bundle\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.978074 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-internal-tls-certs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.978186 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-public-tls-certs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.978250 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk4xh\" (UniqueName: \"kubernetes.io/projected/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-kube-api-access-sk4xh\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.978288 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-logs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.979945 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-logs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.989575 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-combined-ca-bundle\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.990736 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5d6bd97c5-rb9kc"] Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.992522 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-config-data-custom\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.993664 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-public-tls-certs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:15 crc kubenswrapper[4767]: I0128 18:51:15.995862 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-internal-tls-certs\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.001017 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5d6bd97c5-rb9kc"] Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.002401 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-config-data\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.004950 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk4xh\" (UniqueName: \"kubernetes.io/projected/eaabcf8d-9a71-4e3e-91d2-1360f1f16b81-kube-api-access-sk4xh\") pod \"barbican-api-df7856cf4-bs48j\" (UID: \"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81\") " pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.026354 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.800233316 podStartE2EDuration="10.026307931s" podCreationTimestamp="2026-01-28 18:51:06 +0000 UTC" firstStartedPulling="2026-01-28 18:51:07.40388755 +0000 UTC m=+1273.368070424" lastFinishedPulling="2026-01-28 18:51:14.629962165 +0000 UTC m=+1280.594145039" observedRunningTime="2026-01-28 18:51:16.010946512 +0000 UTC m=+1281.975129396" watchObservedRunningTime="2026-01-28 18:51:16.026307931 +0000 UTC m=+1281.990490805" Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.056065 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.810571 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="04104678-852d-4c2f-90af-90d8a3a58b6a" path="/var/lib/kubelet/pods/04104678-852d-4c2f-90af-90d8a3a58b6a/volumes" Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.960518 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f6eb637e-af35-4164-bf33-abd4c7049906","Type":"ContainerStarted","Data":"3a2ad0ff030234dc69eb6e625023512dafd9192f7d829cf9c4815aa256fbfbad"} Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965581 4767 generic.go:334] "Generic (PLEG): container finished" podID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerID="55fb81d295ed99b54e1f16699abf14b85bc06236365f68f6d01658b57027ba48" exitCode=0 Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965624 4767 generic.go:334] "Generic (PLEG): container finished" podID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerID="a6f6dcaa950f2e48099ebd517c4a2212bd4802cf02f8895d9143224bbe5b0973" exitCode=2 Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965634 4767 generic.go:334] "Generic (PLEG): container finished" podID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerID="30cefcbdd816eae073d6d4942349808c5f7acf03b140d3df61367fe01f29739b" exitCode=0 Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965647 4767 generic.go:334] "Generic (PLEG): container finished" podID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerID="684c67e751eddcda3281a8a2c54f6de29cb59043e3b20439004bc668caa20195" exitCode=0 Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965655 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerDied","Data":"55fb81d295ed99b54e1f16699abf14b85bc06236365f68f6d01658b57027ba48"} Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965861 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerDied","Data":"a6f6dcaa950f2e48099ebd517c4a2212bd4802cf02f8895d9143224bbe5b0973"} Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965876 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerDied","Data":"30cefcbdd816eae073d6d4942349808c5f7acf03b140d3df61367fe01f29739b"} Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.965888 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerDied","Data":"684c67e751eddcda3281a8a2c54f6de29cb59043e3b20439004bc668caa20195"} Jan 28 18:51:16 crc kubenswrapper[4767]: I0128 18:51:16.966158 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.117092 4767 scope.go:117] "RemoveContainer" containerID="3780ed617ed6de3b751e83a24cc63869d372a4005e88f2cb4fc58e99c7e25017" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.479263 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.546195 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" podStartSLOduration=6.546166095 podStartE2EDuration="6.546166095s" podCreationTimestamp="2026-01-28 18:51:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:16.063872315 +0000 UTC m=+1282.028055199" watchObservedRunningTime="2026-01-28 18:51:17.546166095 +0000 UTC m=+1283.510348969" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.636101 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-sg-core-conf-yaml\") pod \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.636254 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-scripts\") pod \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.636353 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-run-httpd\") pod \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.636690 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddmmf\" (UniqueName: \"kubernetes.io/projected/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-kube-api-access-ddmmf\") pod \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.636745 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-config-data\") pod \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.636801 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-combined-ca-bundle\") pod \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.636859 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-log-httpd\") pod \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\" (UID: \"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b\") " Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.638003 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" (UID: "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.638497 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.638948 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" (UID: "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.649547 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-kube-api-access-ddmmf" (OuterVolumeSpecName: "kube-api-access-ddmmf") pod "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" (UID: "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b"). InnerVolumeSpecName "kube-api-access-ddmmf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.660861 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-scripts" (OuterVolumeSpecName: "scripts") pod "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" (UID: "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.742566 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddmmf\" (UniqueName: \"kubernetes.io/projected/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-kube-api-access-ddmmf\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.743141 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.743164 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.757471 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" (UID: "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.838145 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" (UID: "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.846659 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.846697 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.870693 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-df7856cf4-bs48j"] Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.897560 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-config-data" (OuterVolumeSpecName: "config-data") pod "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" (UID: "7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:17 crc kubenswrapper[4767]: W0128 18:51:17.902468 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podeaabcf8d_9a71_4e3e_91d2_1360f1f16b81.slice/crio-47ee140f81c2c943a3a7acbb4b4e5ceef8b04291459d9e3aeb463047f7c52f3f WatchSource:0}: Error finding container 47ee140f81c2c943a3a7acbb4b4e5ceef8b04291459d9e3aeb463047f7c52f3f: Status 404 returned error can't find the container with id 47ee140f81c2c943a3a7acbb4b4e5ceef8b04291459d9e3aeb463047f7c52f3f Jan 28 18:51:17 crc kubenswrapper[4767]: I0128 18:51:17.959085 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.071825 4767 generic.go:334] "Generic (PLEG): container finished" podID="b956870a-eae0-48fb-8e4f-182a9f276308" containerID="5694634af50740938137546ad08c9781975843d69d312ad15e40d7db6424d4a0" exitCode=0 Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.071992 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5ptf6" event={"ID":"b956870a-eae0-48fb-8e4f-182a9f276308","Type":"ContainerDied","Data":"5694634af50740938137546ad08c9781975843d69d312ad15e40d7db6424d4a0"} Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.126493 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b","Type":"ContainerDied","Data":"2802f42e98fadbb8aaf33cbd373213e299ec3d79ee26d430f41956713c89b5ff"} Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.126577 4767 scope.go:117] "RemoveContainer" containerID="55fb81d295ed99b54e1f16699abf14b85bc06236365f68f6d01658b57027ba48" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.126801 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.140493 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" event={"ID":"0fb4e951-e65c-46da-9fa1-d710799db1ff","Type":"ContainerStarted","Data":"7a50bb326992f0d8eb67a68879c6cfcd87495030fe1660dd8d8ac734b3df8bcd"} Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.149884 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-df7856cf4-bs48j" event={"ID":"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81","Type":"ContainerStarted","Data":"47ee140f81c2c943a3a7acbb4b4e5ceef8b04291459d9e3aeb463047f7c52f3f"} Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.155778 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-c9f87f88d-pq4n5" event={"ID":"15662321-cd3a-4aa2-9a5c-277fcffc3c79","Type":"ContainerStarted","Data":"5e315c254f3ddec4e3b5df9cd4a6b14db11e59c6ab342e5326380d26610378e9"} Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.156435 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.157246 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.161408 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" event={"ID":"73ac02aa-a281-41be-8b19-b7171b2d7522","Type":"ContainerStarted","Data":"262a80b1fd01c919bf2eababd12d9a2a4db36f08537291ec2f7b198783e8210e"} Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.191169 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-c9f87f88d-pq4n5" podStartSLOduration=6.191137038 podStartE2EDuration="6.191137038s" podCreationTimestamp="2026-01-28 18:51:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:18.181553338 +0000 UTC m=+1284.145736222" watchObservedRunningTime="2026-01-28 18:51:18.191137038 +0000 UTC m=+1284.155319912" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.192631 4767 scope.go:117] "RemoveContainer" containerID="a6f6dcaa950f2e48099ebd517c4a2212bd4802cf02f8895d9143224bbe5b0973" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.243652 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.246415 4767 scope.go:117] "RemoveContainer" containerID="30cefcbdd816eae073d6d4942349808c5f7acf03b140d3df61367fe01f29739b" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.262859 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.295129 4767 scope.go:117] "RemoveContainer" containerID="684c67e751eddcda3281a8a2c54f6de29cb59043e3b20439004bc668caa20195" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.295351 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:18 crc kubenswrapper[4767]: E0128 18:51:18.296640 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-central-agent" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.296669 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-central-agent" Jan 28 18:51:18 crc kubenswrapper[4767]: E0128 18:51:18.296708 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-notification-agent" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.296719 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-notification-agent" Jan 28 18:51:18 crc kubenswrapper[4767]: E0128 18:51:18.296736 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="sg-core" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.296745 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="sg-core" Jan 28 18:51:18 crc kubenswrapper[4767]: E0128 18:51:18.296754 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="proxy-httpd" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.296763 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="proxy-httpd" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.297047 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-central-agent" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.297087 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="ceilometer-notification-agent" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.297109 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="proxy-httpd" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.297122 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" containerName="sg-core" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.305055 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.307960 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.308251 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.318876 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.405653 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.471593 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.471713 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d2gn\" (UniqueName: \"kubernetes.io/projected/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-kube-api-access-2d2gn\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.471772 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-log-httpd\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.471849 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-config-data\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.471899 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.471924 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-scripts\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.471998 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-run-httpd\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.489843 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.574747 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.574828 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-scripts\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.574930 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-run-httpd\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.575026 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.575080 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d2gn\" (UniqueName: \"kubernetes.io/projected/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-kube-api-access-2d2gn\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.575161 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-log-httpd\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.575192 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-config-data\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.576737 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-run-httpd\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.576766 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-log-httpd\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.581560 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-scripts\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.582904 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.588841 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.592844 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-config-data\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.605357 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d2gn\" (UniqueName: \"kubernetes.io/projected/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-kube-api-access-2d2gn\") pod \"ceilometer-0\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.640502 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:18 crc kubenswrapper[4767]: I0128 18:51:18.829717 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b" path="/var/lib/kubelet/pods/7c2f0944-d09c-4e0a-8bf9-c63ff9598e1b/volumes" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.206957 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" event={"ID":"73ac02aa-a281-41be-8b19-b7171b2d7522","Type":"ContainerStarted","Data":"b21c59bc255159e71c26ff14486eb1b484dfef76d70d783abed37128ce23e38d"} Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.225685 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" event={"ID":"0fb4e951-e65c-46da-9fa1-d710799db1ff","Type":"ContainerStarted","Data":"2b03da36fe27bb598f00267d274560cac49032fe84a460dfc96c94131cbc7c82"} Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.238313 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7c6ddf6ccf-ncm6l" podStartSLOduration=4.606038327 podStartE2EDuration="8.238290399s" podCreationTimestamp="2026-01-28 18:51:11 +0000 UTC" firstStartedPulling="2026-01-28 18:51:13.595329464 +0000 UTC m=+1279.559512338" lastFinishedPulling="2026-01-28 18:51:17.227581536 +0000 UTC m=+1283.191764410" observedRunningTime="2026-01-28 18:51:19.228026228 +0000 UTC m=+1285.192209102" watchObservedRunningTime="2026-01-28 18:51:19.238290399 +0000 UTC m=+1285.202473273" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.240364 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-df7856cf4-bs48j" event={"ID":"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81","Type":"ContainerStarted","Data":"52b2b1dc8959442fb7cefa3e3b53cba08e5a41ffac94805b070236b7056e9d25"} Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.257170 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"f6eb637e-af35-4164-bf33-abd4c7049906","Type":"ContainerStarted","Data":"d94006dea80f852af2abc46c8a32a7c498c4cd8c772af7b6fbc81b87c7939b59"} Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.257197 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="cinder-scheduler" containerID="cri-o://eb97552d14a30c1a1a8c79c52c695d9b6ade23c23938f659e96abc2f9dc96cdb" gracePeriod=30 Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.257980 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.257973 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="probe" containerID="cri-o://c6a690ed99d5f6b40c97054e865df89e3db1b412869542800d352c6d078f78e6" gracePeriod=30 Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.261814 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6c5d6855b6-2h826" podStartSLOduration=4.665029838 podStartE2EDuration="8.261784453s" podCreationTimestamp="2026-01-28 18:51:11 +0000 UTC" firstStartedPulling="2026-01-28 18:51:13.589996547 +0000 UTC m=+1279.554179421" lastFinishedPulling="2026-01-28 18:51:17.186751152 +0000 UTC m=+1283.150934036" observedRunningTime="2026-01-28 18:51:19.258303145 +0000 UTC m=+1285.222486019" watchObservedRunningTime="2026-01-28 18:51:19.261784453 +0000 UTC m=+1285.225967327" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.312115 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.312090954 podStartE2EDuration="6.312090954s" podCreationTimestamp="2026-01-28 18:51:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:19.300619586 +0000 UTC m=+1285.264802470" watchObservedRunningTime="2026-01-28 18:51:19.312090954 +0000 UTC m=+1285.276273828" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.365911 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:19 crc kubenswrapper[4767]: W0128 18:51:19.376387 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09ee5dc4_06f4_4231_b3fd_06d0ce50b467.slice/crio-ca2dd5a401b635b162d5ba879475b7a225842de39753240a4b04770cb8cd562e WatchSource:0}: Error finding container ca2dd5a401b635b162d5ba879475b7a225842de39753240a4b04770cb8cd562e: Status 404 returned error can't find the container with id ca2dd5a401b635b162d5ba879475b7a225842de39753240a4b04770cb8cd562e Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.756175 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5ptf6" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.904026 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-58b8dff7b-297q9" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.923026 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xq4mq\" (UniqueName: \"kubernetes.io/projected/b956870a-eae0-48fb-8e4f-182a9f276308-kube-api-access-xq4mq\") pod \"b956870a-eae0-48fb-8e4f-182a9f276308\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.923094 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-config-data\") pod \"b956870a-eae0-48fb-8e4f-182a9f276308\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.923221 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-combined-ca-bundle\") pod \"b956870a-eae0-48fb-8e4f-182a9f276308\" (UID: \"b956870a-eae0-48fb-8e4f-182a9f276308\") " Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.945554 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b956870a-eae0-48fb-8e4f-182a9f276308-kube-api-access-xq4mq" (OuterVolumeSpecName: "kube-api-access-xq4mq") pod "b956870a-eae0-48fb-8e4f-182a9f276308" (UID: "b956870a-eae0-48fb-8e4f-182a9f276308"). InnerVolumeSpecName "kube-api-access-xq4mq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:19 crc kubenswrapper[4767]: I0128 18:51:19.974532 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b956870a-eae0-48fb-8e4f-182a9f276308" (UID: "b956870a-eae0-48fb-8e4f-182a9f276308"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.028050 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xq4mq\" (UniqueName: \"kubernetes.io/projected/b956870a-eae0-48fb-8e4f-182a9f276308-kube-api-access-xq4mq\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.028099 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.081551 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-config-data" (OuterVolumeSpecName: "config-data") pod "b956870a-eae0-48fb-8e4f-182a9f276308" (UID: "b956870a-eae0-48fb-8e4f-182a9f276308"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.132150 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b956870a-eae0-48fb-8e4f-182a9f276308-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.274601 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-df7856cf4-bs48j" event={"ID":"eaabcf8d-9a71-4e3e-91d2-1360f1f16b81","Type":"ContainerStarted","Data":"0801807dbbf752ad1c0cb21dfe2a4f7ca5505efc6220cf1c4bc5ae55d1983c75"} Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.285310 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerStarted","Data":"ca2dd5a401b635b162d5ba879475b7a225842de39753240a4b04770cb8cd562e"} Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.290119 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-db-sync-5ptf6" event={"ID":"b956870a-eae0-48fb-8e4f-182a9f276308","Type":"ContainerDied","Data":"7814196a4da2add88b3faa267244a54e4877e24ac10f357f4ec3a1864e0f8e3e"} Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.290189 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7814196a4da2add88b3faa267244a54e4877e24ac10f357f4ec3a1864e0f8e3e" Jan 28 18:51:20 crc kubenswrapper[4767]: I0128 18:51:20.290435 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-db-sync-5ptf6" Jan 28 18:51:21 crc kubenswrapper[4767]: I0128 18:51:21.301382 4767 generic.go:334] "Generic (PLEG): container finished" podID="f3a17316-3eca-4517-8254-94224f26cf8f" containerID="c6a690ed99d5f6b40c97054e865df89e3db1b412869542800d352c6d078f78e6" exitCode=0 Jan 28 18:51:21 crc kubenswrapper[4767]: I0128 18:51:21.301454 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f3a17316-3eca-4517-8254-94224f26cf8f","Type":"ContainerDied","Data":"c6a690ed99d5f6b40c97054e865df89e3db1b412869542800d352c6d078f78e6"} Jan 28 18:51:21 crc kubenswrapper[4767]: I0128 18:51:21.303409 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:21 crc kubenswrapper[4767]: I0128 18:51:21.303476 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:21 crc kubenswrapper[4767]: I0128 18:51:21.340119 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-df7856cf4-bs48j" podStartSLOduration=6.340088857 podStartE2EDuration="6.340088857s" podCreationTimestamp="2026-01-28 18:51:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:21.326257615 +0000 UTC m=+1287.290440509" watchObservedRunningTime="2026-01-28 18:51:21.340088857 +0000 UTC m=+1287.304271741" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.598353 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.613560 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 28 18:51:22 crc kubenswrapper[4767]: E0128 18:51:22.614012 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b956870a-eae0-48fb-8e4f-182a9f276308" containerName="heat-db-sync" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.614035 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b956870a-eae0-48fb-8e4f-182a9f276308" containerName="heat-db-sync" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.614227 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b956870a-eae0-48fb-8e4f-182a9f276308" containerName="heat-db-sync" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.614954 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.624693 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.624926 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config-secret\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.625040 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.625112 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26r4f\" (UniqueName: \"kubernetes.io/projected/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-kube-api-access-26r4f\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.633767 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.634138 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.634990 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-g8z42" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.684877 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.719574 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4b5np"] Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.719889 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerName="dnsmasq-dns" containerID="cri-o://df7372e3e19ffc60702cf139f694a349493503445b8ec8777123cbd66f4acbb2" gracePeriod=10 Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.728129 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.728766 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config-secret\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.728831 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.728864 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26r4f\" (UniqueName: \"kubernetes.io/projected/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-kube-api-access-26r4f\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.730073 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.744019 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config-secret\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.751547 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-combined-ca-bundle\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.759688 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26r4f\" (UniqueName: \"kubernetes.io/projected/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-kube-api-access-26r4f\") pod \"openstackclient\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " pod="openstack/openstackclient" Jan 28 18:51:22 crc kubenswrapper[4767]: I0128 18:51:22.946098 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 18:51:23 crc kubenswrapper[4767]: I0128 18:51:23.344112 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.145:5353: connect: connection refused" Jan 28 18:51:23 crc kubenswrapper[4767]: I0128 18:51:23.431068 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 18:51:23 crc kubenswrapper[4767]: W0128 18:51:23.442552 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7a0ada4b_d0dd_4984_8dd3_95a6814a45e1.slice/crio-a98e48dcb5cce19f1841daaa5b2c4cbe3a1b5fb60f236768023d370c1ac5e931 WatchSource:0}: Error finding container a98e48dcb5cce19f1841daaa5b2c4cbe3a1b5fb60f236768023d370c1ac5e931: Status 404 returned error can't find the container with id a98e48dcb5cce19f1841daaa5b2c4cbe3a1b5fb60f236768023d370c1ac5e931 Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.237846 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.368264 4767 generic.go:334] "Generic (PLEG): container finished" podID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerID="df7372e3e19ffc60702cf139f694a349493503445b8ec8777123cbd66f4acbb2" exitCode=0 Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.368363 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" event={"ID":"3aee08af-b80f-4f86-a11c-29227f00fb5b","Type":"ContainerDied","Data":"df7372e3e19ffc60702cf139f694a349493503445b8ec8777123cbd66f4acbb2"} Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.383547 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1","Type":"ContainerStarted","Data":"a98e48dcb5cce19f1841daaa5b2c4cbe3a1b5fb60f236768023d370c1ac5e931"} Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.404282 4767 generic.go:334] "Generic (PLEG): container finished" podID="f3a17316-3eca-4517-8254-94224f26cf8f" containerID="eb97552d14a30c1a1a8c79c52c695d9b6ade23c23938f659e96abc2f9dc96cdb" exitCode=0 Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.404371 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f3a17316-3eca-4517-8254-94224f26cf8f","Type":"ContainerDied","Data":"eb97552d14a30c1a1a8c79c52c695d9b6ade23c23938f659e96abc2f9dc96cdb"} Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.668125 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.669239 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.774911 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.800298 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-svc\") pod \"3aee08af-b80f-4f86-a11c-29227f00fb5b\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.800519 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-swift-storage-0\") pod \"3aee08af-b80f-4f86-a11c-29227f00fb5b\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.800573 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-nb\") pod \"3aee08af-b80f-4f86-a11c-29227f00fb5b\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.800734 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bwcd8\" (UniqueName: \"kubernetes.io/projected/3aee08af-b80f-4f86-a11c-29227f00fb5b-kube-api-access-bwcd8\") pod \"3aee08af-b80f-4f86-a11c-29227f00fb5b\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.800962 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-sb\") pod \"3aee08af-b80f-4f86-a11c-29227f00fb5b\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.800993 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-config\") pod \"3aee08af-b80f-4f86-a11c-29227f00fb5b\" (UID: \"3aee08af-b80f-4f86-a11c-29227f00fb5b\") " Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.837901 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3aee08af-b80f-4f86-a11c-29227f00fb5b-kube-api-access-bwcd8" (OuterVolumeSpecName: "kube-api-access-bwcd8") pod "3aee08af-b80f-4f86-a11c-29227f00fb5b" (UID: "3aee08af-b80f-4f86-a11c-29227f00fb5b"). InnerVolumeSpecName "kube-api-access-bwcd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.905314 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bwcd8\" (UniqueName: \"kubernetes.io/projected/3aee08af-b80f-4f86-a11c-29227f00fb5b-kube-api-access-bwcd8\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:24 crc kubenswrapper[4767]: I0128 18:51:24.947182 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3aee08af-b80f-4f86-a11c-29227f00fb5b" (UID: "3aee08af-b80f-4f86-a11c-29227f00fb5b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.008887 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3aee08af-b80f-4f86-a11c-29227f00fb5b" (UID: "3aee08af-b80f-4f86-a11c-29227f00fb5b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.010263 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.010311 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.025184 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3aee08af-b80f-4f86-a11c-29227f00fb5b" (UID: "3aee08af-b80f-4f86-a11c-29227f00fb5b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.034480 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-config" (OuterVolumeSpecName: "config") pod "3aee08af-b80f-4f86-a11c-29227f00fb5b" (UID: "3aee08af-b80f-4f86-a11c-29227f00fb5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.120517 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.120563 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.129048 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3aee08af-b80f-4f86-a11c-29227f00fb5b" (UID: "3aee08af-b80f-4f86-a11c-29227f00fb5b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.192480 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.229224 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data\") pod \"f3a17316-3eca-4517-8254-94224f26cf8f\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.229301 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data-custom\") pod \"f3a17316-3eca-4517-8254-94224f26cf8f\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.231359 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3aee08af-b80f-4f86-a11c-29227f00fb5b-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.245359 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f3a17316-3eca-4517-8254-94224f26cf8f" (UID: "f3a17316-3eca-4517-8254-94224f26cf8f"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.332763 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-scripts\") pod \"f3a17316-3eca-4517-8254-94224f26cf8f\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.332907 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h2fsv\" (UniqueName: \"kubernetes.io/projected/f3a17316-3eca-4517-8254-94224f26cf8f-kube-api-access-h2fsv\") pod \"f3a17316-3eca-4517-8254-94224f26cf8f\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.332981 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-combined-ca-bundle\") pod \"f3a17316-3eca-4517-8254-94224f26cf8f\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.333055 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3a17316-3eca-4517-8254-94224f26cf8f-etc-machine-id\") pod \"f3a17316-3eca-4517-8254-94224f26cf8f\" (UID: \"f3a17316-3eca-4517-8254-94224f26cf8f\") " Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.335485 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.335572 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f3a17316-3eca-4517-8254-94224f26cf8f-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "f3a17316-3eca-4517-8254-94224f26cf8f" (UID: "f3a17316-3eca-4517-8254-94224f26cf8f"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.343467 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-scripts" (OuterVolumeSpecName: "scripts") pod "f3a17316-3eca-4517-8254-94224f26cf8f" (UID: "f3a17316-3eca-4517-8254-94224f26cf8f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.357009 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3a17316-3eca-4517-8254-94224f26cf8f-kube-api-access-h2fsv" (OuterVolumeSpecName: "kube-api-access-h2fsv") pod "f3a17316-3eca-4517-8254-94224f26cf8f" (UID: "f3a17316-3eca-4517-8254-94224f26cf8f"). InnerVolumeSpecName "kube-api-access-h2fsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.443803 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.444579 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h2fsv\" (UniqueName: \"kubernetes.io/projected/f3a17316-3eca-4517-8254-94224f26cf8f-kube-api-access-h2fsv\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.444598 4767 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/f3a17316-3eca-4517-8254-94224f26cf8f-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.462577 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" event={"ID":"3aee08af-b80f-4f86-a11c-29227f00fb5b","Type":"ContainerDied","Data":"bbfd6416c03ad7ffde3850da1aaffe0b2b6603e32d610e74d4d910add38f452a"} Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.462648 4767 scope.go:117] "RemoveContainer" containerID="df7372e3e19ffc60702cf139f694a349493503445b8ec8777123cbd66f4acbb2" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.462813 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-56df8fb6b7-4b5np" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.471793 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f3a17316-3eca-4517-8254-94224f26cf8f" (UID: "f3a17316-3eca-4517-8254-94224f26cf8f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.488105 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data" (OuterVolumeSpecName: "config-data") pod "f3a17316-3eca-4517-8254-94224f26cf8f" (UID: "f3a17316-3eca-4517-8254-94224f26cf8f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.491695 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"f3a17316-3eca-4517-8254-94224f26cf8f","Type":"ContainerDied","Data":"af05ebacd723dbf074ff88870af2fc051cea01fd76147056a4013000f1b69c31"} Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.491828 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.496186 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerStarted","Data":"b977c05fb98503d8bf37aca91ef7a3c6ba4f43590cb7c954d8333d4b458cdcf4"} Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.551407 4767 scope.go:117] "RemoveContainer" containerID="85aa7fe70cc5d6b23bcd3f1a605e3043a91213fe327bf2679cd7f6db839009ea" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.565709 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.565756 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f3a17316-3eca-4517-8254-94224f26cf8f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.573560 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4b5np"] Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.615263 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-56df8fb6b7-4b5np"] Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.651047 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.656380 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.701781 4767 scope.go:117] "RemoveContainer" containerID="c6a690ed99d5f6b40c97054e865df89e3db1b412869542800d352c6d078f78e6" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.732419 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:25 crc kubenswrapper[4767]: E0128 18:51:25.733926 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerName="dnsmasq-dns" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.733963 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerName="dnsmasq-dns" Jan 28 18:51:25 crc kubenswrapper[4767]: E0128 18:51:25.734003 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="probe" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.734014 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="probe" Jan 28 18:51:25 crc kubenswrapper[4767]: E0128 18:51:25.734039 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerName="init" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.734048 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerName="init" Jan 28 18:51:25 crc kubenswrapper[4767]: E0128 18:51:25.734066 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="cinder-scheduler" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.734075 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="cinder-scheduler" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.738855 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" containerName="dnsmasq-dns" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.738911 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="probe" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.738925 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" containerName="cinder-scheduler" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.741946 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.747634 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.751686 4767 scope.go:117] "RemoveContainer" containerID="eb97552d14a30c1a1a8c79c52c695d9b6ade23c23938f659e96abc2f9dc96cdb" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.827122 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df31a172-a9c6-46bc-a327-03ef85482e5c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.827241 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5m8vp\" (UniqueName: \"kubernetes.io/projected/df31a172-a9c6-46bc-a327-03ef85482e5c-kube-api-access-5m8vp\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.827341 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-config-data\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.827397 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.827437 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-scripts\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.828727 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.829424 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.931172 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df31a172-a9c6-46bc-a327-03ef85482e5c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.931331 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5m8vp\" (UniqueName: \"kubernetes.io/projected/df31a172-a9c6-46bc-a327-03ef85482e5c-kube-api-access-5m8vp\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.931365 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-config-data\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.932232 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.932305 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-scripts\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.932323 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.936564 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/df31a172-a9c6-46bc-a327-03ef85482e5c-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.946945 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-scripts\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.947275 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.949468 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-config-data\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.957808 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5m8vp\" (UniqueName: \"kubernetes.io/projected/df31a172-a9c6-46bc-a327-03ef85482e5c-kube-api-access-5m8vp\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:25 crc kubenswrapper[4767]: I0128 18:51:25.964380 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df31a172-a9c6-46bc-a327-03ef85482e5c-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"df31a172-a9c6-46bc-a327-03ef85482e5c\") " pod="openstack/cinder-scheduler-0" Jan 28 18:51:26 crc kubenswrapper[4767]: I0128 18:51:26.189863 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 28 18:51:26 crc kubenswrapper[4767]: I0128 18:51:26.538510 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerStarted","Data":"1ab423be294bd4d7374e2ce78b0b7291b960a2bab826bc8b811156e80d4f1ecf"} Jan 28 18:51:26 crc kubenswrapper[4767]: I0128 18:51:26.828412 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3aee08af-b80f-4f86-a11c-29227f00fb5b" path="/var/lib/kubelet/pods/3aee08af-b80f-4f86-a11c-29227f00fb5b/volumes" Jan 28 18:51:26 crc kubenswrapper[4767]: I0128 18:51:26.829535 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f3a17316-3eca-4517-8254-94224f26cf8f" path="/var/lib/kubelet/pods/f3a17316-3eca-4517-8254-94224f26cf8f/volumes" Jan 28 18:51:26 crc kubenswrapper[4767]: I0128 18:51:26.907921 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 28 18:51:27 crc kubenswrapper[4767]: I0128 18:51:27.626185 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerStarted","Data":"beb92e8703c24bd489e19f6ef23aed43212e92b01a834151fcb0ef6d8c468b77"} Jan 28 18:51:27 crc kubenswrapper[4767]: I0128 18:51:27.634673 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"df31a172-a9c6-46bc-a327-03ef85482e5c","Type":"ContainerStarted","Data":"d3b8504f50bae63bc659f20060a025166c398f9822f7e5be3c6c688637cf43c9"} Jan 28 18:51:28 crc kubenswrapper[4767]: I0128 18:51:28.655747 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"df31a172-a9c6-46bc-a327-03ef85482e5c","Type":"ContainerStarted","Data":"43f2c9aae0a703c93d7ff10c0505ebf56dc474d449b072029a57e51be19cfff4"} Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.061191 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.390526 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-df7856cf4-bs48j" Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.503656 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="f6eb637e-af35-4164-bf33-abd4c7049906" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.160:8776/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.508388 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5c9944b8f4-9qvg4"] Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.508684 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api-log" containerID="cri-o://54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792" gracePeriod=30 Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.509241 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api" containerID="cri-o://190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10" gracePeriod=30 Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.530837 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": EOF" Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.531110 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": EOF" Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.762833 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"df31a172-a9c6-46bc-a327-03ef85482e5c","Type":"ContainerStarted","Data":"21055553625c628076af9869ff0be00312fca2740f5e5e7767f479b20e85be62"} Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.798870 4767 generic.go:334] "Generic (PLEG): container finished" podID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerID="54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792" exitCode=143 Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.799102 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9944b8f4-9qvg4" event={"ID":"2dd67540-b613-4585-b1f8-8a4ba27c3d93","Type":"ContainerDied","Data":"54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792"} Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.799351 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=4.799323552 podStartE2EDuration="4.799323552s" podCreationTimestamp="2026-01-28 18:51:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:29.79892799 +0000 UTC m=+1295.763110884" watchObservedRunningTime="2026-01-28 18:51:29.799323552 +0000 UTC m=+1295.763506426" Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.811343 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerStarted","Data":"f48e79b4c905fb5ecfaca9b54ee55f83ea0465e3a117f917c9c9d9315d025ec8"} Jan 28 18:51:29 crc kubenswrapper[4767]: I0128 18:51:29.811473 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:51:31 crc kubenswrapper[4767]: I0128 18:51:31.190164 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.375438 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.427553 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=4.837977045 podStartE2EDuration="14.427503348s" podCreationTimestamp="2026-01-28 18:51:18 +0000 UTC" firstStartedPulling="2026-01-28 18:51:19.379997935 +0000 UTC m=+1285.344180819" lastFinishedPulling="2026-01-28 18:51:28.969524248 +0000 UTC m=+1294.933707122" observedRunningTime="2026-01-28 18:51:29.862699012 +0000 UTC m=+1295.826881896" watchObservedRunningTime="2026-01-28 18:51:32.427503348 +0000 UTC m=+1298.391686222" Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.977884 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-59d5689489-z5swl"] Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.980017 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.983739 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-engine-config-data" Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.986298 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-config-data" Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.990749 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-heat-dockercfg-rvl9v" Jan 28 18:51:32 crc kubenswrapper[4767]: I0128 18:51:32.993417 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-59d5689489-z5swl"] Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.046769 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jf2mf\" (UniqueName: \"kubernetes.io/projected/33e5dd4b-468d-4a50-9429-1faba885d020-kube-api-access-jf2mf\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.046849 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data-custom\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.046931 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-combined-ca-bundle\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.046967 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.150877 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jf2mf\" (UniqueName: \"kubernetes.io/projected/33e5dd4b-468d-4a50-9429-1faba885d020-kube-api-access-jf2mf\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.280079 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data-custom\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.280488 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-combined-ca-bundle\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.280615 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.297732 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jf2mf\" (UniqueName: \"kubernetes.io/projected/33e5dd4b-468d-4a50-9429-1faba885d020-kube-api-access-jf2mf\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.331679 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.331940 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data-custom\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.332391 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-combined-ca-bundle\") pod \"heat-engine-59d5689489-z5swl\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.348148 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f8cd4d6dc-bpnbm"] Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.382681 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.418039 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f8cd4d6dc-bpnbm"] Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.457150 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-6cd865bf84-w9w7d"] Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.460152 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.466663 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-api-config-data" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.484880 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-nb\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.484969 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-swift-storage-0\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.485040 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgwk4\" (UniqueName: \"kubernetes.io/projected/1a40cf4b-e422-4f81-a8b6-6f202a809438-kube-api-access-mgwk4\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.485103 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-svc\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.485152 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-sb\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.485182 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-config\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.560350 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6cd865bf84-w9w7d"] Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.569291 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-56cbd47b65-wxqrt"] Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.570742 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.574132 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"heat-cfnapi-config-data" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.589575 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-sb\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.589687 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wqrt\" (UniqueName: \"kubernetes.io/projected/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-kube-api-access-2wqrt\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.589734 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-config\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.589764 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-combined-ca-bundle\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.589841 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-nb\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.589875 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-swift-storage-0\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.589971 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgwk4\" (UniqueName: \"kubernetes.io/projected/1a40cf4b-e422-4f81-a8b6-6f202a809438-kube-api-access-mgwk4\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.590014 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.590045 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data-custom\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.590139 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-svc\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.591300 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-svc\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.591906 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-sb\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.592457 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-config\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.593395 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-swift-storage-0\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.594299 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-nb\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.617459 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.637676 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-56cbd47b65-wxqrt"] Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.648358 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgwk4\" (UniqueName: \"kubernetes.io/projected/1a40cf4b-e422-4f81-a8b6-6f202a809438-kube-api-access-mgwk4\") pod \"dnsmasq-dns-f8cd4d6dc-bpnbm\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.702230 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.703102 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jns9\" (UniqueName: \"kubernetes.io/projected/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-kube-api-access-5jns9\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.703137 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-combined-ca-bundle\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.703305 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wqrt\" (UniqueName: \"kubernetes.io/projected/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-kube-api-access-2wqrt\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.703367 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-combined-ca-bundle\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.703394 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data-custom\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.703713 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.703756 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data-custom\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.719579 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-combined-ca-bundle\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.719842 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data-custom\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.729793 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.751866 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wqrt\" (UniqueName: \"kubernetes.io/projected/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-kube-api-access-2wqrt\") pod \"heat-api-6cd865bf84-w9w7d\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.757771 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.800973 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.810810 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.810951 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jns9\" (UniqueName: \"kubernetes.io/projected/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-kube-api-access-5jns9\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.810993 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-combined-ca-bundle\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.811104 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data-custom\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.817562 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.834475 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-combined-ca-bundle\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.836115 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data-custom\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.854453 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jns9\" (UniqueName: \"kubernetes.io/projected/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-kube-api-access-5jns9\") pod \"heat-cfnapi-56cbd47b65-wxqrt\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.929250 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.994539 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:54396->10.217.0.158:9311: read: connection reset by peer" Jan 28 18:51:33 crc kubenswrapper[4767]: I0128 18:51:33.995044 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5c9944b8f4-9qvg4" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:54390->10.217.0.158:9311: read: connection reset by peer" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.353949 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-59d5689489-z5swl"] Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.807317 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.896852 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f8cd4d6dc-bpnbm"] Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.903422 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-6cd865bf84-w9w7d"] Jan 28 18:51:34 crc kubenswrapper[4767]: W0128 18:51:34.938247 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c05cf04_9e30_4aa2_b2ad_2aadc34a10c9.slice/crio-0ca537347dc8d09e34d38285fa3794920e9c138c5c49beec74f94c96c100d7b3 WatchSource:0}: Error finding container 0ca537347dc8d09e34d38285fa3794920e9c138c5c49beec74f94c96c100d7b3: Status 404 returned error can't find the container with id 0ca537347dc8d09e34d38285fa3794920e9c138c5c49beec74f94c96c100d7b3 Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.947748 4767 generic.go:334] "Generic (PLEG): container finished" podID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerID="190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10" exitCode=0 Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.947893 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5c9944b8f4-9qvg4" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.948311 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9944b8f4-9qvg4" event={"ID":"2dd67540-b613-4585-b1f8-8a4ba27c3d93","Type":"ContainerDied","Data":"190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10"} Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.948384 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5c9944b8f4-9qvg4" event={"ID":"2dd67540-b613-4585-b1f8-8a4ba27c3d93","Type":"ContainerDied","Data":"48a01ca9465fec6b3a35587997622238cfe9ae1c5f534806077914e394d931ec"} Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.948412 4767 scope.go:117] "RemoveContainer" containerID="190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.956901 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-combined-ca-bundle\") pod \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.957065 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dd67540-b613-4585-b1f8-8a4ba27c3d93-logs\") pod \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.957117 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data-custom\") pod \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.957184 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data\") pod \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.957313 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fp8kt\" (UniqueName: \"kubernetes.io/projected/2dd67540-b613-4585-b1f8-8a4ba27c3d93-kube-api-access-fp8kt\") pod \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\" (UID: \"2dd67540-b613-4585-b1f8-8a4ba27c3d93\") " Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.957939 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2dd67540-b613-4585-b1f8-8a4ba27c3d93-logs" (OuterVolumeSpecName: "logs") pod "2dd67540-b613-4585-b1f8-8a4ba27c3d93" (UID: "2dd67540-b613-4585-b1f8-8a4ba27c3d93"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.960344 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2dd67540-b613-4585-b1f8-8a4ba27c3d93-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.966244 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2dd67540-b613-4585-b1f8-8a4ba27c3d93" (UID: "2dd67540-b613-4585-b1f8-8a4ba27c3d93"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.970789 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59d5689489-z5swl" event={"ID":"33e5dd4b-468d-4a50-9429-1faba885d020","Type":"ContainerStarted","Data":"56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4"} Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.970857 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59d5689489-z5swl" event={"ID":"33e5dd4b-468d-4a50-9429-1faba885d020","Type":"ContainerStarted","Data":"25b9748b6f8dc26309146e40cd1ee4cc1b3eb8c7dfa36724dea162c8baf0d34f"} Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.972384 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:34 crc kubenswrapper[4767]: I0128 18:51:34.973118 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dd67540-b613-4585-b1f8-8a4ba27c3d93-kube-api-access-fp8kt" (OuterVolumeSpecName: "kube-api-access-fp8kt") pod "2dd67540-b613-4585-b1f8-8a4ba27c3d93" (UID: "2dd67540-b613-4585-b1f8-8a4ba27c3d93"). InnerVolumeSpecName "kube-api-access-fp8kt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.005405 4767 scope.go:117] "RemoveContainer" containerID="54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.019115 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-56cbd47b65-wxqrt"] Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.021448 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-59d5689489-z5swl" podStartSLOduration=3.021413304 podStartE2EDuration="3.021413304s" podCreationTimestamp="2026-01-28 18:51:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:35.001772211 +0000 UTC m=+1300.965955095" watchObservedRunningTime="2026-01-28 18:51:35.021413304 +0000 UTC m=+1300.985596178" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.026267 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2dd67540-b613-4585-b1f8-8a4ba27c3d93" (UID: "2dd67540-b613-4585-b1f8-8a4ba27c3d93"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.067999 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.068175 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fp8kt\" (UniqueName: \"kubernetes.io/projected/2dd67540-b613-4585-b1f8-8a4ba27c3d93-kube-api-access-fp8kt\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.068226 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.090358 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data" (OuterVolumeSpecName: "config-data") pod "2dd67540-b613-4585-b1f8-8a4ba27c3d93" (UID: "2dd67540-b613-4585-b1f8-8a4ba27c3d93"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.098796 4767 scope.go:117] "RemoveContainer" containerID="190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10" Jan 28 18:51:35 crc kubenswrapper[4767]: E0128 18:51:35.105686 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10\": container with ID starting with 190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10 not found: ID does not exist" containerID="190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.105771 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10"} err="failed to get container status \"190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10\": rpc error: code = NotFound desc = could not find container \"190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10\": container with ID starting with 190accacf294280a9d80dd709c082b1261ba24446271ed9abcaf6d960e8a7c10 not found: ID does not exist" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.105813 4767 scope.go:117] "RemoveContainer" containerID="54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792" Jan 28 18:51:35 crc kubenswrapper[4767]: E0128 18:51:35.107418 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792\": container with ID starting with 54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792 not found: ID does not exist" containerID="54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.107508 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792"} err="failed to get container status \"54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792\": rpc error: code = NotFound desc = could not find container \"54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792\": container with ID starting with 54b765387110586caa57345cc511b38c78ccb025a34a0a8125bc485a5efc4792 not found: ID does not exist" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.173607 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2dd67540-b613-4585-b1f8-8a4ba27c3d93-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.304248 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5c9944b8f4-9qvg4"] Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.312053 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5c9944b8f4-9qvg4"] Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.983472 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-66c6598f9f-9w6r9"] Jan 28 18:51:35 crc kubenswrapper[4767]: E0128 18:51:35.984512 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api-log" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.984528 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api-log" Jan 28 18:51:35 crc kubenswrapper[4767]: E0128 18:51:35.984562 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.984572 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.984774 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.984787 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" containerName="barbican-api-log" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.986088 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.992930 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.995337 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 28 18:51:35 crc kubenswrapper[4767]: I0128 18:51:35.995709 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.002938 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" event={"ID":"f385fd5b-9b44-4d64-b9a5-39ffddab1c34","Type":"ContainerStarted","Data":"5e91d3c559a8390fc0995ab85fb2831f2eb137bdb464a473fafe40a1bf2a4fd4"} Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.008784 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-66c6598f9f-9w6r9"] Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.013616 4767 generic.go:334] "Generic (PLEG): container finished" podID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerID="35f1c8c3820593ab7a98b6298a1097ecd4f1633f28326e42421804ca1ada8c75" exitCode=0 Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.014535 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" event={"ID":"1a40cf4b-e422-4f81-a8b6-6f202a809438","Type":"ContainerDied","Data":"35f1c8c3820593ab7a98b6298a1097ecd4f1633f28326e42421804ca1ada8c75"} Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.014592 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" event={"ID":"1a40cf4b-e422-4f81-a8b6-6f202a809438","Type":"ContainerStarted","Data":"21b8627b30476179d57336ea20473869f2a287682f43a3bd1ad2578e7af5efb0"} Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.025606 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6cd865bf84-w9w7d" event={"ID":"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9","Type":"ContainerStarted","Data":"0ca537347dc8d09e34d38285fa3794920e9c138c5c49beec74f94c96c100d7b3"} Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099067 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-etc-swift\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099383 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shf4q\" (UniqueName: \"kubernetes.io/projected/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-kube-api-access-shf4q\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099428 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-public-tls-certs\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099493 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-internal-tls-certs\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099531 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-config-data\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099608 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-log-httpd\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099639 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-combined-ca-bundle\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.099738 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-run-httpd\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202170 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-run-httpd\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202278 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-etc-swift\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202393 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shf4q\" (UniqueName: \"kubernetes.io/projected/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-kube-api-access-shf4q\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202420 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-public-tls-certs\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202456 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-internal-tls-certs\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202489 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-config-data\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202537 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-log-httpd\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.202570 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-combined-ca-bundle\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.203640 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-run-httpd\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.208252 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-log-httpd\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.211957 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-public-tls-certs\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.212023 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-combined-ca-bundle\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.217042 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-internal-tls-certs\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.222393 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-config-data\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.222607 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-etc-swift\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.242176 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shf4q\" (UniqueName: \"kubernetes.io/projected/85ceb5d8-a7fe-4e66-a20f-6a309942c1fc-kube-api-access-shf4q\") pod \"swift-proxy-66c6598f9f-9w6r9\" (UID: \"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc\") " pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.321617 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.575525 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.832653 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dd67540-b613-4585-b1f8-8a4ba27c3d93" path="/var/lib/kubelet/pods/2dd67540-b613-4585-b1f8-8a4ba27c3d93/volumes" Jan 28 18:51:36 crc kubenswrapper[4767]: I0128 18:51:36.999018 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-66c6598f9f-9w6r9"] Jan 28 18:51:37 crc kubenswrapper[4767]: I0128 18:51:37.044686 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" event={"ID":"1a40cf4b-e422-4f81-a8b6-6f202a809438","Type":"ContainerStarted","Data":"8456cdd09ab332083994f954e13921f2ca1748bf6b9b10ac39da74ce7992b8f4"} Jan 28 18:51:37 crc kubenswrapper[4767]: I0128 18:51:37.044793 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:37 crc kubenswrapper[4767]: I0128 18:51:37.079579 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" podStartSLOduration=4.079539288 podStartE2EDuration="4.079539288s" podCreationTimestamp="2026-01-28 18:51:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:37.069923957 +0000 UTC m=+1303.034106841" watchObservedRunningTime="2026-01-28 18:51:37.079539288 +0000 UTC m=+1303.043722162" Jan 28 18:51:39 crc kubenswrapper[4767]: I0128 18:51:39.696326 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:39 crc kubenswrapper[4767]: I0128 18:51:39.697720 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-central-agent" containerID="cri-o://b977c05fb98503d8bf37aca91ef7a3c6ba4f43590cb7c954d8333d4b458cdcf4" gracePeriod=30 Jan 28 18:51:39 crc kubenswrapper[4767]: I0128 18:51:39.700522 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="proxy-httpd" containerID="cri-o://f48e79b4c905fb5ecfaca9b54ee55f83ea0465e3a117f917c9c9d9315d025ec8" gracePeriod=30 Jan 28 18:51:39 crc kubenswrapper[4767]: I0128 18:51:39.700780 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-notification-agent" containerID="cri-o://1ab423be294bd4d7374e2ce78b0b7291b960a2bab826bc8b811156e80d4f1ecf" gracePeriod=30 Jan 28 18:51:39 crc kubenswrapper[4767]: I0128 18:51:39.705543 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="sg-core" containerID="cri-o://beb92e8703c24bd489e19f6ef23aed43212e92b01a834151fcb0ef6d8c468b77" gracePeriod=30 Jan 28 18:51:39 crc kubenswrapper[4767]: I0128 18:51:39.719571 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.162:3000/\": EOF" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.091848 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-engine-75f5487b88-96qvr"] Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.093520 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.120826 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-75f5487b88-96qvr"] Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.134434 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-757c589dd9-zvtp6"] Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.135999 4767 generic.go:334] "Generic (PLEG): container finished" podID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerID="f48e79b4c905fb5ecfaca9b54ee55f83ea0465e3a117f917c9c9d9315d025ec8" exitCode=0 Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.136058 4767 generic.go:334] "Generic (PLEG): container finished" podID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerID="beb92e8703c24bd489e19f6ef23aed43212e92b01a834151fcb0ef6d8c468b77" exitCode=2 Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.161404 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-dc6d99b6-mtv24"] Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.163373 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.166879 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-dc6d99b6-mtv24"] Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.166984 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerDied","Data":"f48e79b4c905fb5ecfaca9b54ee55f83ea0465e3a117f917c9c9d9315d025ec8"} Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.167092 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerDied","Data":"beb92e8703c24bd489e19f6ef23aed43212e92b01a834151fcb0ef6d8c468b77"} Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.167289 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.191556 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-757c589dd9-zvtp6"] Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214042 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8flx\" (UniqueName: \"kubernetes.io/projected/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-kube-api-access-h8flx\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214099 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-config-data-custom\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214156 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data-custom\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214229 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214252 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214325 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-combined-ca-bundle\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214388 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7x49d\" (UniqueName: \"kubernetes.io/projected/79f79643-bd94-43a4-9be8-98513b220314-kube-api-access-7x49d\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214405 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk294\" (UniqueName: \"kubernetes.io/projected/a77e5f05-fa4b-4899-acca-36efb0710320-kube-api-access-mk294\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214438 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-config-data\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214469 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data-custom\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214496 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-combined-ca-bundle\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.214520 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-combined-ca-bundle\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.321595 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-combined-ca-bundle\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.321762 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7x49d\" (UniqueName: \"kubernetes.io/projected/79f79643-bd94-43a4-9be8-98513b220314-kube-api-access-7x49d\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.321791 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk294\" (UniqueName: \"kubernetes.io/projected/a77e5f05-fa4b-4899-acca-36efb0710320-kube-api-access-mk294\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.321844 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-config-data\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.321888 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data-custom\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.321924 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-combined-ca-bundle\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.321960 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-combined-ca-bundle\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.322015 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8flx\" (UniqueName: \"kubernetes.io/projected/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-kube-api-access-h8flx\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.322043 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-config-data-custom\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.322122 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data-custom\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.322230 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.322258 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.343751 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-combined-ca-bundle\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.350417 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.353495 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk294\" (UniqueName: \"kubernetes.io/projected/a77e5f05-fa4b-4899-acca-36efb0710320-kube-api-access-mk294\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.358793 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-combined-ca-bundle\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.358932 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data-custom\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.358933 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-combined-ca-bundle\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.359094 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7x49d\" (UniqueName: \"kubernetes.io/projected/79f79643-bd94-43a4-9be8-98513b220314-kube-api-access-7x49d\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.359524 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-config-data\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.359575 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.364260 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8flx\" (UniqueName: \"kubernetes.io/projected/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-kube-api-access-h8flx\") pod \"heat-cfnapi-dc6d99b6-mtv24\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.367398 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data-custom\") pod \"heat-api-757c589dd9-zvtp6\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.372106 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/79f79643-bd94-43a4-9be8-98513b220314-config-data-custom\") pod \"heat-engine-75f5487b88-96qvr\" (UID: \"79f79643-bd94-43a4-9be8-98513b220314\") " pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.421067 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.492918 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:40 crc kubenswrapper[4767]: I0128 18:51:40.542048 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:41 crc kubenswrapper[4767]: I0128 18:51:41.157568 4767 generic.go:334] "Generic (PLEG): container finished" podID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerID="1ab423be294bd4d7374e2ce78b0b7291b960a2bab826bc8b811156e80d4f1ecf" exitCode=0 Jan 28 18:51:41 crc kubenswrapper[4767]: I0128 18:51:41.157621 4767 generic.go:334] "Generic (PLEG): container finished" podID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerID="b977c05fb98503d8bf37aca91ef7a3c6ba4f43590cb7c954d8333d4b458cdcf4" exitCode=0 Jan 28 18:51:41 crc kubenswrapper[4767]: I0128 18:51:41.157653 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerDied","Data":"1ab423be294bd4d7374e2ce78b0b7291b960a2bab826bc8b811156e80d4f1ecf"} Jan 28 18:51:41 crc kubenswrapper[4767]: I0128 18:51:41.157697 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerDied","Data":"b977c05fb98503d8bf37aca91ef7a3c6ba4f43590cb7c954d8333d4b458cdcf4"} Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.175412 4767 generic.go:334] "Generic (PLEG): container finished" podID="cbd7dd91-c84a-442f-86af-c3a06ca9a373" containerID="d46fa9f61d3a844beeae98cfaa9a563690b5a6876b170d769db6eec982b81858" exitCode=0 Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.175824 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kdhmb" event={"ID":"cbd7dd91-c84a-442f-86af-c3a06ca9a373","Type":"ContainerDied","Data":"d46fa9f61d3a844beeae98cfaa9a563690b5a6876b170d769db6eec982b81858"} Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.770584 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-56cbd47b65-wxqrt"] Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.788363 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6cd865bf84-w9w7d"] Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.818765 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-cfnapi-7fb965dcbf-xj95b"] Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.820156 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.825073 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-public-svc" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.825295 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-cfnapi-internal-svc" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.830591 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/heat-api-68db597dbc-btcng"] Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.832426 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.837107 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-public-svc" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.838464 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-heat-api-internal-svc" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.859653 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7fb965dcbf-xj95b"] Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.881651 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-68db597dbc-btcng"] Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892402 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-config-data-custom\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892457 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-internal-tls-certs\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892526 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-combined-ca-bundle\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892584 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-public-tls-certs\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892605 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8987c\" (UniqueName: \"kubernetes.io/projected/1ace2f10-9cec-4091-a68f-7680d0f282fc-kube-api-access-8987c\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892652 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-public-tls-certs\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892697 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-combined-ca-bundle\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892736 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qhbk\" (UniqueName: \"kubernetes.io/projected/0b974209-d851-443b-88b4-868e5564e0fb-kube-api-access-8qhbk\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892768 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-internal-tls-certs\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892812 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-config-data\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892849 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-config-data\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.892878 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-config-data-custom\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995312 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-combined-ca-bundle\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995393 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qhbk\" (UniqueName: \"kubernetes.io/projected/0b974209-d851-443b-88b4-868e5564e0fb-kube-api-access-8qhbk\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995433 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-internal-tls-certs\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995484 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-config-data\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995524 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-config-data\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995555 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-config-data-custom\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995635 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-config-data-custom\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995661 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-internal-tls-certs\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995717 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-combined-ca-bundle\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995757 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-public-tls-certs\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995788 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8987c\" (UniqueName: \"kubernetes.io/projected/1ace2f10-9cec-4091-a68f-7680d0f282fc-kube-api-access-8987c\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:42 crc kubenswrapper[4767]: I0128 18:51:42.995836 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-public-tls-certs\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.005082 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-combined-ca-bundle\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.006833 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-internal-tls-certs\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.007227 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-public-tls-certs\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.007542 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-public-tls-certs\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.007763 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-internal-tls-certs\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.010712 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-config-data\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.012077 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-combined-ca-bundle\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.013148 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-config-data\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.015776 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0b974209-d851-443b-88b4-868e5564e0fb-config-data-custom\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.016115 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1ace2f10-9cec-4091-a68f-7680d0f282fc-config-data-custom\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.022812 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qhbk\" (UniqueName: \"kubernetes.io/projected/0b974209-d851-443b-88b4-868e5564e0fb-kube-api-access-8qhbk\") pod \"heat-cfnapi-7fb965dcbf-xj95b\" (UID: \"0b974209-d851-443b-88b4-868e5564e0fb\") " pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.023590 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8987c\" (UniqueName: \"kubernetes.io/projected/1ace2f10-9cec-4091-a68f-7680d0f282fc-kube-api-access-8987c\") pod \"heat-api-68db597dbc-btcng\" (UID: \"1ace2f10-9cec-4091-a68f-7680d0f282fc\") " pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.168952 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.173860 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.761528 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.891389 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v"] Jan 28 18:51:43 crc kubenswrapper[4767]: I0128 18:51:43.891838 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" podUID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerName="dnsmasq-dns" containerID="cri-o://50e5b7616f3f00ab5c90b6b604f9b08b8af456577f56689e6606244b7e8f5f0b" gracePeriod=10 Jan 28 18:51:44 crc kubenswrapper[4767]: I0128 18:51:44.243575 4767 generic.go:334] "Generic (PLEG): container finished" podID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerID="50e5b7616f3f00ab5c90b6b604f9b08b8af456577f56689e6606244b7e8f5f0b" exitCode=0 Jan 28 18:51:44 crc kubenswrapper[4767]: I0128 18:51:44.243755 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" event={"ID":"127f2e41-56a9-49d5-af60-577d5b3d7093","Type":"ContainerDied","Data":"50e5b7616f3f00ab5c90b6b604f9b08b8af456577f56689e6606244b7e8f5f0b"} Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.163721 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.169816 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-c9f87f88d-pq4n5" Jan 28 18:51:45 crc kubenswrapper[4767]: W0128 18:51:45.327112 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod85ceb5d8_a7fe_4e66_a20f_6a309942c1fc.slice/crio-dd955ab0abf3f7425ea6d1a8723d7b82d004bfa985261cfea6ed82299251f4ad WatchSource:0}: Error finding container dd955ab0abf3f7425ea6d1a8723d7b82d004bfa985261cfea6ed82299251f4ad: Status 404 returned error can't find the container with id dd955ab0abf3f7425ea6d1a8723d7b82d004bfa985261cfea6ed82299251f4ad Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.478188 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.598930 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lk2wr\" (UniqueName: \"kubernetes.io/projected/cbd7dd91-c84a-442f-86af-c3a06ca9a373-kube-api-access-lk2wr\") pod \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.599154 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-config\") pod \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.599661 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-combined-ca-bundle\") pod \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\" (UID: \"cbd7dd91-c84a-442f-86af-c3a06ca9a373\") " Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.629178 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbd7dd91-c84a-442f-86af-c3a06ca9a373-kube-api-access-lk2wr" (OuterVolumeSpecName: "kube-api-access-lk2wr") pod "cbd7dd91-c84a-442f-86af-c3a06ca9a373" (UID: "cbd7dd91-c84a-442f-86af-c3a06ca9a373"). InnerVolumeSpecName "kube-api-access-lk2wr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.703253 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lk2wr\" (UniqueName: \"kubernetes.io/projected/cbd7dd91-c84a-442f-86af-c3a06ca9a373-kube-api-access-lk2wr\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.722312 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cbd7dd91-c84a-442f-86af-c3a06ca9a373" (UID: "cbd7dd91-c84a-442f-86af-c3a06ca9a373"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.743366 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-config" (OuterVolumeSpecName: "config") pod "cbd7dd91-c84a-442f-86af-c3a06ca9a373" (UID: "cbd7dd91-c84a-442f-86af-c3a06ca9a373"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.807126 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:45 crc kubenswrapper[4767]: I0128 18:51:45.807177 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbd7dd91-c84a-442f-86af-c3a06ca9a373-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.299090 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" event={"ID":"127f2e41-56a9-49d5-af60-577d5b3d7093","Type":"ContainerDied","Data":"c1bc259d86054336aa2772a76a0d32472697438274764c30169cf6d97ee66d3b"} Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.299172 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c1bc259d86054336aa2772a76a0d32472697438274764c30169cf6d97ee66d3b" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.303659 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-kdhmb" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.303734 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-kdhmb" event={"ID":"cbd7dd91-c84a-442f-86af-c3a06ca9a373","Type":"ContainerDied","Data":"e4dd237191b1774a347be9e1d72e23ba7500a507a2ade51cc7444e7c430536f9"} Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.303786 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e4dd237191b1774a347be9e1d72e23ba7500a507a2ade51cc7444e7c430536f9" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.346992 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-66c6598f9f-9w6r9" event={"ID":"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc","Type":"ContainerStarted","Data":"dd955ab0abf3f7425ea6d1a8723d7b82d004bfa985261cfea6ed82299251f4ad"} Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.411192 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.534994 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-config\") pod \"127f2e41-56a9-49d5-af60-577d5b3d7093\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.535095 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb\") pod \"127f2e41-56a9-49d5-af60-577d5b3d7093\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.535196 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-sb\") pod \"127f2e41-56a9-49d5-af60-577d5b3d7093\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.535275 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-swift-storage-0\") pod \"127f2e41-56a9-49d5-af60-577d5b3d7093\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.535371 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q7m4s\" (UniqueName: \"kubernetes.io/projected/127f2e41-56a9-49d5-af60-577d5b3d7093-kube-api-access-q7m4s\") pod \"127f2e41-56a9-49d5-af60-577d5b3d7093\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.535457 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-svc\") pod \"127f2e41-56a9-49d5-af60-577d5b3d7093\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.554264 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/127f2e41-56a9-49d5-af60-577d5b3d7093-kube-api-access-q7m4s" (OuterVolumeSpecName: "kube-api-access-q7m4s") pod "127f2e41-56a9-49d5-af60-577d5b3d7093" (UID: "127f2e41-56a9-49d5-af60-577d5b3d7093"). InnerVolumeSpecName "kube-api-access-q7m4s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.639912 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q7m4s\" (UniqueName: \"kubernetes.io/projected/127f2e41-56a9-49d5-af60-577d5b3d7093-kube-api-access-q7m4s\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.640540 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "127f2e41-56a9-49d5-af60-577d5b3d7093" (UID: "127f2e41-56a9-49d5-af60-577d5b3d7093"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.667017 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-config" (OuterVolumeSpecName: "config") pod "127f2e41-56a9-49d5-af60-577d5b3d7093" (UID: "127f2e41-56a9-49d5-af60-577d5b3d7093"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.687822 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "127f2e41-56a9-49d5-af60-577d5b3d7093" (UID: "127f2e41-56a9-49d5-af60-577d5b3d7093"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.742556 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "127f2e41-56a9-49d5-af60-577d5b3d7093" (UID: "127f2e41-56a9-49d5-af60-577d5b3d7093"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.743183 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb\") pod \"127f2e41-56a9-49d5-af60-577d5b3d7093\" (UID: \"127f2e41-56a9-49d5-af60-577d5b3d7093\") " Jan 28 18:51:46 crc kubenswrapper[4767]: W0128 18:51:46.743340 4767 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/127f2e41-56a9-49d5-af60-577d5b3d7093/volumes/kubernetes.io~configmap/ovsdbserver-nb Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.743373 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "127f2e41-56a9-49d5-af60-577d5b3d7093" (UID: "127f2e41-56a9-49d5-af60-577d5b3d7093"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.745673 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.745716 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.745783 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.745795 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.782717 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "127f2e41-56a9-49d5-af60-577d5b3d7093" (UID: "127f2e41-56a9-49d5-af60-577d5b3d7093"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.820836 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d9w94"] Jan 28 18:51:46 crc kubenswrapper[4767]: E0128 18:51:46.821537 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerName="dnsmasq-dns" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.821563 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerName="dnsmasq-dns" Jan 28 18:51:46 crc kubenswrapper[4767]: E0128 18:51:46.821595 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerName="init" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.821602 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerName="init" Jan 28 18:51:46 crc kubenswrapper[4767]: E0128 18:51:46.821619 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbd7dd91-c84a-442f-86af-c3a06ca9a373" containerName="neutron-db-sync" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.821626 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbd7dd91-c84a-442f-86af-c3a06ca9a373" containerName="neutron-db-sync" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.821907 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="127f2e41-56a9-49d5-af60-577d5b3d7093" containerName="dnsmasq-dns" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.821965 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbd7dd91-c84a-442f-86af-c3a06ca9a373" containerName="neutron-db-sync" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.823427 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.850021 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/127f2e41-56a9-49d5-af60-577d5b3d7093-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.856300 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d9w94"] Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.889382 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6b85bcdd44-j7nc5"] Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.891726 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.899088 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-xl44z" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.899515 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.899707 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.899939 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.953255 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.953334 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.953362 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rlnk\" (UniqueName: \"kubernetes.io/projected/530c6c66-df17-4d54-b254-b0bf9e860545-kube-api-access-6rlnk\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.953461 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.953923 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-config\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.954001 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:46 crc kubenswrapper[4767]: I0128 18:51:46.954419 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6b85bcdd44-j7nc5"] Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056090 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056157 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056183 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rlnk\" (UniqueName: \"kubernetes.io/projected/530c6c66-df17-4d54-b254-b0bf9e860545-kube-api-access-6rlnk\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056223 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056271 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-ovndb-tls-certs\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056300 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-config\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056330 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-combined-ca-bundle\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056362 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8bdn\" (UniqueName: \"kubernetes.io/projected/bb57e040-6449-4416-87a1-776751f75752-kube-api-access-h8bdn\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056409 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-httpd-config\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056495 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-config\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.056534 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.057383 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-sb\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.057576 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-swift-storage-0\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.058018 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-nb\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.059073 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-svc\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.061006 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-config\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.090009 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rlnk\" (UniqueName: \"kubernetes.io/projected/530c6c66-df17-4d54-b254-b0bf9e860545-kube-api-access-6rlnk\") pod \"dnsmasq-dns-688b9f5b49-d9w94\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.158808 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-ovndb-tls-certs\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.158871 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-config\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.158901 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-combined-ca-bundle\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.158939 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8bdn\" (UniqueName: \"kubernetes.io/projected/bb57e040-6449-4416-87a1-776751f75752-kube-api-access-h8bdn\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.158988 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-httpd-config\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.165964 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-combined-ca-bundle\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.166338 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-config\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.166933 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-ovndb-tls-certs\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.168859 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-httpd-config\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.180114 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.185645 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8bdn\" (UniqueName: \"kubernetes.io/projected/bb57e040-6449-4416-87a1-776751f75752-kube-api-access-h8bdn\") pod \"neutron-6b85bcdd44-j7nc5\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.247241 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.363494 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.399096 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v"] Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.410289 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cc8b5d5c5-x7h6v"] Jan 28 18:51:47 crc kubenswrapper[4767]: E0128 18:51:47.572498 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-heat-api:current-podified" Jan 28 18:51:47 crc kubenswrapper[4767]: E0128 18:51:47.572852 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:heat-api,Image:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_httpd_setup && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n5bhbch78hd7h5ch68ch8chd8h578h64ch564h5bh68chfch649hd6h688h589h548h9h54bh689h5dfh54dh544h5b5hf8h6hdch5bbh678h657q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:heat-api-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-custom,ReadOnly:true,MountPath:/etc/heat/heat.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2wqrt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthcheck,Port:{0 8004 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:10,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthcheck,Port:{0 8004 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:10,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42418,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42418,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-api-6cd865bf84-w9w7d_openstack(1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 28 18:51:47 crc kubenswrapper[4767]: E0128 18:51:47.574357 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/heat-api-6cd865bf84-w9w7d" podUID="1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9" Jan 28 18:51:47 crc kubenswrapper[4767]: I0128 18:51:47.848288 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.000910 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d2gn\" (UniqueName: \"kubernetes.io/projected/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-kube-api-access-2d2gn\") pod \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.000980 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-run-httpd\") pod \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.001045 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-config-data\") pod \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.001079 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-combined-ca-bundle\") pod \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.001129 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-sg-core-conf-yaml\") pod \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.001178 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-log-httpd\") pod \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.001320 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-scripts\") pod \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\" (UID: \"09ee5dc4-06f4-4231-b3fd-06d0ce50b467\") " Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.006016 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "09ee5dc4-06f4-4231-b3fd-06d0ce50b467" (UID: "09ee5dc4-06f4-4231-b3fd-06d0ce50b467"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.007523 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "09ee5dc4-06f4-4231-b3fd-06d0ce50b467" (UID: "09ee5dc4-06f4-4231-b3fd-06d0ce50b467"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.011507 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-kube-api-access-2d2gn" (OuterVolumeSpecName: "kube-api-access-2d2gn") pod "09ee5dc4-06f4-4231-b3fd-06d0ce50b467" (UID: "09ee5dc4-06f4-4231-b3fd-06d0ce50b467"). InnerVolumeSpecName "kube-api-access-2d2gn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.016810 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-scripts" (OuterVolumeSpecName: "scripts") pod "09ee5dc4-06f4-4231-b3fd-06d0ce50b467" (UID: "09ee5dc4-06f4-4231-b3fd-06d0ce50b467"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.110541 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d2gn\" (UniqueName: \"kubernetes.io/projected/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-kube-api-access-2d2gn\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.110579 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.110591 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.110602 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.141557 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "09ee5dc4-06f4-4231-b3fd-06d0ce50b467" (UID: "09ee5dc4-06f4-4231-b3fd-06d0ce50b467"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.220737 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.292421 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09ee5dc4-06f4-4231-b3fd-06d0ce50b467" (UID: "09ee5dc4-06f4-4231-b3fd-06d0ce50b467"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.323571 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.328676 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-engine-75f5487b88-96qvr"] Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.332401 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-config-data" (OuterVolumeSpecName: "config-data") pod "09ee5dc4-06f4-4231-b3fd-06d0ce50b467" (UID: "09ee5dc4-06f4-4231-b3fd-06d0ce50b467"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.397084 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.397096 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"09ee5dc4-06f4-4231-b3fd-06d0ce50b467","Type":"ContainerDied","Data":"ca2dd5a401b635b162d5ba879475b7a225842de39753240a4b04770cb8cd562e"} Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.397247 4767 scope.go:117] "RemoveContainer" containerID="f48e79b4c905fb5ecfaca9b54ee55f83ea0465e3a117f917c9c9d9315d025ec8" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.432615 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09ee5dc4-06f4-4231-b3fd-06d0ce50b467-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.566009 4767 scope.go:117] "RemoveContainer" containerID="beb92e8703c24bd489e19f6ef23aed43212e92b01a834151fcb0ef6d8c468b77" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.622607 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.647276 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.667757 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:48 crc kubenswrapper[4767]: E0128 18:51:48.669063 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="sg-core" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.669174 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="sg-core" Jan 28 18:51:48 crc kubenswrapper[4767]: E0128 18:51:48.669297 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="proxy-httpd" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.669387 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="proxy-httpd" Jan 28 18:51:48 crc kubenswrapper[4767]: E0128 18:51:48.669493 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-notification-agent" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.669582 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-notification-agent" Jan 28 18:51:48 crc kubenswrapper[4767]: E0128 18:51:48.669947 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-central-agent" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.670014 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-central-agent" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.670631 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="proxy-httpd" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.670774 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-central-agent" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.670841 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="sg-core" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.670903 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" containerName="ceilometer-notification-agent" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.673562 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.682939 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.688923 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.752280 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-run-httpd\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.752358 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.752472 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6tgm\" (UniqueName: \"kubernetes.io/projected/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-kube-api-access-l6tgm\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.752580 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-log-httpd\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.752859 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.753044 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-config-data\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.753158 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-scripts\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.768528 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.829568 4767 scope.go:117] "RemoveContainer" containerID="1ab423be294bd4d7374e2ce78b0b7291b960a2bab826bc8b811156e80d4f1ecf" Jan 28 18:51:48 crc kubenswrapper[4767]: E0128 18:51:48.865390 4767 kuberuntime_gc.go:389] "Failed to remove container log dead symlink" err="remove /var/log/containers/ceilometer-0_openstack_ceilometer-notification-agent-1ab423be294bd4d7374e2ce78b0b7291b960a2bab826bc8b811156e80d4f1ecf.log: no such file or directory" path="/var/log/containers/ceilometer-0_openstack_ceilometer-notification-agent-1ab423be294bd4d7374e2ce78b0b7291b960a2bab826bc8b811156e80d4f1ecf.log" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.867986 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-scripts\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.868239 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-run-httpd\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.868290 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.868378 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6tgm\" (UniqueName: \"kubernetes.io/projected/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-kube-api-access-l6tgm\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.868462 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-log-httpd\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.868875 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.868978 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-config-data\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.869280 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ee5dc4-06f4-4231-b3fd-06d0ce50b467" path="/var/lib/kubelet/pods/09ee5dc4-06f4-4231-b3fd-06d0ce50b467/volumes" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.869876 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-run-httpd\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.876170 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-log-httpd\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.876413 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="127f2e41-56a9-49d5-af60-577d5b3d7093" path="/var/lib/kubelet/pods/127f2e41-56a9-49d5-af60-577d5b3d7093/volumes" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.881264 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.887991 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.893033 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-scripts\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.893647 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-config-data\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.904015 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6tgm\" (UniqueName: \"kubernetes.io/projected/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-kube-api-access-l6tgm\") pod \"ceilometer-0\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " pod="openstack/ceilometer-0" Jan 28 18:51:48 crc kubenswrapper[4767]: I0128 18:51:48.937440 4767 scope.go:117] "RemoveContainer" containerID="b977c05fb98503d8bf37aca91ef7a3c6ba4f43590cb7c954d8333d4b458cdcf4" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.021633 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-7fb965dcbf-xj95b"] Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.061380 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.459863 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-cfnapi-dc6d99b6-mtv24"] Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.483890 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-757c589dd9-zvtp6"] Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.522273 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/heat-api-68db597dbc-btcng"] Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.524717 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-66c6598f9f-9w6r9" event={"ID":"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc","Type":"ContainerStarted","Data":"c065f4f57e5b5f65944b57339dbe304d8275cbc51f112de97d46652d58070368"} Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.537177 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75f5487b88-96qvr" event={"ID":"79f79643-bd94-43a4-9be8-98513b220314","Type":"ContainerStarted","Data":"534df612fefe1547624b9377dc4dd2d1cde4271b8cf5d382dd7467e12cbee9f0"} Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.543182 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1","Type":"ContainerStarted","Data":"72b28aebca58d31b790efe757402aa16fa1a7dfb5722945d2ab0a9629f7c4ce1"} Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.582987 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" event={"ID":"0b974209-d851-443b-88b4-868e5564e0fb","Type":"ContainerStarted","Data":"a8ee630dd85b83a9241d0d5804042fdf384009edfcff75224f1328eddf8d1d0e"} Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.591563 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" event={"ID":"f385fd5b-9b44-4d64-b9a5-39ffddab1c34","Type":"ContainerStarted","Data":"156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9"} Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.591777 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" podUID="f385fd5b-9b44-4d64-b9a5-39ffddab1c34" containerName="heat-cfnapi" containerID="cri-o://156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9" gracePeriod=60 Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.591892 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.592527 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d9w94"] Jan 28 18:51:49 crc kubenswrapper[4767]: W0128 18:51:49.603688 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ace2f10_9cec_4091_a68f_7680d0f282fc.slice/crio-87e9d13a8f7a0698b2593edfdb6379c766e821115218e510f6202716ada44cbc WatchSource:0}: Error finding container 87e9d13a8f7a0698b2593edfdb6379c766e821115218e510f6202716ada44cbc: Status 404 returned error can't find the container with id 87e9d13a8f7a0698b2593edfdb6379c766e821115218e510f6202716ada44cbc Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.635495 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-678958868f-sc9dm"] Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.652605 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.658160 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.658465 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.725020 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-public-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.725105 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-combined-ca-bundle\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.725136 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-config\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.725222 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-ovndb-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.725257 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfjj7\" (UniqueName: \"kubernetes.io/projected/62702b24-3b22-4f62-8c70-e14a8749be55-kube-api-access-mfjj7\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.725288 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-httpd-config\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.725311 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-internal-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.735036 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=3.375088366 podStartE2EDuration="27.735008058s" podCreationTimestamp="2026-01-28 18:51:22 +0000 UTC" firstStartedPulling="2026-01-28 18:51:23.447988715 +0000 UTC m=+1289.412171589" lastFinishedPulling="2026-01-28 18:51:47.807908407 +0000 UTC m=+1313.772091281" observedRunningTime="2026-01-28 18:51:49.567434975 +0000 UTC m=+1315.531617859" watchObservedRunningTime="2026-01-28 18:51:49.735008058 +0000 UTC m=+1315.699190942" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.739309 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-678958868f-sc9dm"] Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.767639 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" podStartSLOduration=3.95633514 podStartE2EDuration="16.767602226s" podCreationTimestamp="2026-01-28 18:51:33 +0000 UTC" firstStartedPulling="2026-01-28 18:51:35.006042614 +0000 UTC m=+1300.970225488" lastFinishedPulling="2026-01-28 18:51:47.8173097 +0000 UTC m=+1313.781492574" observedRunningTime="2026-01-28 18:51:49.616325262 +0000 UTC m=+1315.580508156" watchObservedRunningTime="2026-01-28 18:51:49.767602226 +0000 UTC m=+1315.731785100" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.830358 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-public-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.830460 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-combined-ca-bundle\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.830504 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-config\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.830621 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-ovndb-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.830667 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfjj7\" (UniqueName: \"kubernetes.io/projected/62702b24-3b22-4f62-8c70-e14a8749be55-kube-api-access-mfjj7\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.830712 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-httpd-config\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.830745 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-internal-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.852356 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-httpd-config\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.853731 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-public-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.855793 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-ovndb-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.859710 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-internal-tls-certs\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.860514 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfjj7\" (UniqueName: \"kubernetes.io/projected/62702b24-3b22-4f62-8c70-e14a8749be55-kube-api-access-mfjj7\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.861972 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-config\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.862192 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/62702b24-3b22-4f62-8c70-e14a8749be55-combined-ca-bundle\") pod \"neutron-678958868f-sc9dm\" (UID: \"62702b24-3b22-4f62-8c70-e14a8749be55\") " pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:49 crc kubenswrapper[4767]: I0128 18:51:49.863714 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6b85bcdd44-j7nc5"] Jan 28 18:51:49 crc kubenswrapper[4767]: W0128 18:51:49.882337 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbb57e040_6449_4416_87a1_776751f75752.slice/crio-a5f0a72c454ef5abb176011aafa794d0e59a46a22a7bf0e4f97538ff9b6bf5c5 WatchSource:0}: Error finding container a5f0a72c454ef5abb176011aafa794d0e59a46a22a7bf0e4f97538ff9b6bf5c5: Status 404 returned error can't find the container with id a5f0a72c454ef5abb176011aafa794d0e59a46a22a7bf0e4f97538ff9b6bf5c5 Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.064587 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.077515 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.130059 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.250347 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wqrt\" (UniqueName: \"kubernetes.io/projected/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-kube-api-access-2wqrt\") pod \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.250779 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data-custom\") pod \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.251498 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-combined-ca-bundle\") pod \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.251653 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data\") pod \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\" (UID: \"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9\") " Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.263121 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data" (OuterVolumeSpecName: "config-data") pod "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9" (UID: "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.268533 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9" (UID: "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.278545 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9" (UID: "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.286432 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-kube-api-access-2wqrt" (OuterVolumeSpecName: "kube-api-access-2wqrt") pod "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9" (UID: "1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9"). InnerVolumeSpecName "kube-api-access-2wqrt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.355630 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.355686 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.355702 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.355718 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wqrt\" (UniqueName: \"kubernetes.io/projected/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9-kube-api-access-2wqrt\") on node \"crc\" DevicePath \"\"" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.738666 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" event={"ID":"0b974209-d851-443b-88b4-868e5564e0fb","Type":"ContainerStarted","Data":"67bf018056f37c3754b8174b524642b5134ac79c212c20d251f77093b02c768a"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.741198 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.760983 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b85bcdd44-j7nc5" event={"ID":"bb57e040-6449-4416-87a1-776751f75752","Type":"ContainerStarted","Data":"dba4659dc85b26e12ae70c25ded3b5737cc1016d59e2c35f465db8f18ca60614"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.761470 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b85bcdd44-j7nc5" event={"ID":"bb57e040-6449-4416-87a1-776751f75752","Type":"ContainerStarted","Data":"a5f0a72c454ef5abb176011aafa794d0e59a46a22a7bf0e4f97538ff9b6bf5c5"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.775923 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-66c6598f9f-9w6r9" event={"ID":"85ceb5d8-a7fe-4e66-a20f-6a309942c1fc","Type":"ContainerStarted","Data":"3254e1922db1c7b27806f7dad5543c42b201b9e7c17022505f256bf54e23adb6"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.776393 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.777422 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.849867 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" podStartSLOduration=8.849833093 podStartE2EDuration="8.849833093s" podCreationTimestamp="2026-01-28 18:51:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:50.790768489 +0000 UTC m=+1316.754951393" watchObservedRunningTime="2026-01-28 18:51:50.849833093 +0000 UTC m=+1316.814015977" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.857892 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-6cd865bf84-w9w7d" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.867706 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-66c6598f9f-9w6r9" podStartSLOduration=15.86765972 podStartE2EDuration="15.86765972s" podCreationTimestamp="2026-01-28 18:51:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:50.841161052 +0000 UTC m=+1316.805343926" watchObservedRunningTime="2026-01-28 18:51:50.86765972 +0000 UTC m=+1316.831842594" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.897265 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.897312 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-75f5487b88-96qvr" event={"ID":"79f79643-bd94-43a4-9be8-98513b220314","Type":"ContainerStarted","Data":"85df5f6607b91bf40f4d6a87f3e5d83c6827623e99713a34c497f058778633b1"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.897353 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-6cd865bf84-w9w7d" event={"ID":"1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9","Type":"ContainerDied","Data":"0ca537347dc8d09e34d38285fa3794920e9c138c5c49beec74f94c96c100d7b3"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.897375 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerStarted","Data":"b5d699a5730055140918b2b7d7d675dbc0f995d6eed898e49438d77aa0ed14fa"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.900422 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-757c589dd9-zvtp6" event={"ID":"a77e5f05-fa4b-4899-acca-36efb0710320","Type":"ContainerStarted","Data":"102fd1f3aaea8bc8ffffe148018b6d3f3cbf8c348d759846d33a050e3ed74f4b"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.910753 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-engine-75f5487b88-96qvr" podStartSLOduration=10.910726555 podStartE2EDuration="10.910726555s" podCreationTimestamp="2026-01-28 18:51:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:50.884492746 +0000 UTC m=+1316.848675620" watchObservedRunningTime="2026-01-28 18:51:50.910726555 +0000 UTC m=+1316.874909429" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.921634 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" event={"ID":"b31daada-81d3-4cfa-bf1d-8d05fc34f82b","Type":"ContainerStarted","Data":"ae4aac87a9b2937d1a7abed809cad66685a36e9496503e90e2b4071ee3a81319"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.921704 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" event={"ID":"b31daada-81d3-4cfa-bf1d-8d05fc34f82b","Type":"ContainerStarted","Data":"7945b14c3e2e61bc9709514f981385d51c15a9478b85ca93db0802b33f4457ba"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.922588 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.940970 4767 generic.go:334] "Generic (PLEG): container finished" podID="530c6c66-df17-4d54-b254-b0bf9e860545" containerID="239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0" exitCode=0 Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.941351 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" event={"ID":"530c6c66-df17-4d54-b254-b0bf9e860545","Type":"ContainerDied","Data":"239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.942357 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" event={"ID":"530c6c66-df17-4d54-b254-b0bf9e860545","Type":"ContainerStarted","Data":"72a62254ee09764951b982197eeeddfbb8bd1dcd62b140a3c6a7f4c1d09ca886"} Jan 28 18:51:50 crc kubenswrapper[4767]: I0128 18:51:50.949769 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-68db597dbc-btcng" event={"ID":"1ace2f10-9cec-4091-a68f-7680d0f282fc","Type":"ContainerStarted","Data":"87e9d13a8f7a0698b2593edfdb6379c766e821115218e510f6202716ada44cbc"} Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.074779 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" podStartSLOduration=11.073539759 podStartE2EDuration="11.073539759s" podCreationTimestamp="2026-01-28 18:51:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:50.971868845 +0000 UTC m=+1316.936051739" watchObservedRunningTime="2026-01-28 18:51:51.073539759 +0000 UTC m=+1317.037722633" Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.220389 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-6cd865bf84-w9w7d"] Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.232013 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-6cd865bf84-w9w7d"] Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.243119 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-678958868f-sc9dm"] Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.979896 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" event={"ID":"530c6c66-df17-4d54-b254-b0bf9e860545","Type":"ContainerStarted","Data":"86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528"} Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.980371 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.993949 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-68db597dbc-btcng" event={"ID":"1ace2f10-9cec-4091-a68f-7680d0f282fc","Type":"ContainerStarted","Data":"d5ed4294f9fb02b690f83eb59e66aa40c8d983f993cde7021628805788006cd7"} Jan 28 18:51:51 crc kubenswrapper[4767]: I0128 18:51:51.995157 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.000431 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678958868f-sc9dm" event={"ID":"62702b24-3b22-4f62-8c70-e14a8749be55","Type":"ContainerStarted","Data":"8a90184557cb8cd9f0ea43ff4c97beeaa8518b4bd6f4707b394e9051edff1401"} Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.001729 4767 generic.go:334] "Generic (PLEG): container finished" podID="a77e5f05-fa4b-4899-acca-36efb0710320" containerID="41e57127d0bebf0a0db058d54d4fd976bc1dde2b77ae49429e9524b88f483185" exitCode=1 Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.001778 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-757c589dd9-zvtp6" event={"ID":"a77e5f05-fa4b-4899-acca-36efb0710320","Type":"ContainerDied","Data":"41e57127d0bebf0a0db058d54d4fd976bc1dde2b77ae49429e9524b88f483185"} Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.002520 4767 scope.go:117] "RemoveContainer" containerID="41e57127d0bebf0a0db058d54d4fd976bc1dde2b77ae49429e9524b88f483185" Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.015402 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" podStartSLOduration=6.015371912 podStartE2EDuration="6.015371912s" podCreationTimestamp="2026-01-28 18:51:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:52.012380859 +0000 UTC m=+1317.976563743" watchObservedRunningTime="2026-01-28 18:51:52.015371912 +0000 UTC m=+1317.979554786" Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.054692 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b85bcdd44-j7nc5" event={"ID":"bb57e040-6449-4416-87a1-776751f75752","Type":"ContainerStarted","Data":"30f6cba37c598c6e0290d4657113db77f14c9572cdd524b80f3cae6218895496"} Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.055680 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.056958 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-68db597dbc-btcng" podStartSLOduration=9.452239076 podStartE2EDuration="10.0569281s" podCreationTimestamp="2026-01-28 18:51:42 +0000 UTC" firstStartedPulling="2026-01-28 18:51:49.616486487 +0000 UTC m=+1315.580669351" lastFinishedPulling="2026-01-28 18:51:50.221175501 +0000 UTC m=+1316.185358375" observedRunningTime="2026-01-28 18:51:52.054345369 +0000 UTC m=+1318.018528243" watchObservedRunningTime="2026-01-28 18:51:52.0569281 +0000 UTC m=+1318.021110974" Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.076019 4767 generic.go:334] "Generic (PLEG): container finished" podID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerID="ae4aac87a9b2937d1a7abed809cad66685a36e9496503e90e2b4071ee3a81319" exitCode=1 Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.076161 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" event={"ID":"b31daada-81d3-4cfa-bf1d-8d05fc34f82b","Type":"ContainerDied","Data":"ae4aac87a9b2937d1a7abed809cad66685a36e9496503e90e2b4071ee3a81319"} Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.079022 4767 scope.go:117] "RemoveContainer" containerID="ae4aac87a9b2937d1a7abed809cad66685a36e9496503e90e2b4071ee3a81319" Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.095511 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerStarted","Data":"2e89e42108b5d94bbe9a6e5655bf4d3defed609495f59d702d3d6454336b75cf"} Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.171906 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6b85bcdd44-j7nc5" podStartSLOduration=6.17187632 podStartE2EDuration="6.17187632s" podCreationTimestamp="2026-01-28 18:51:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:52.151969788 +0000 UTC m=+1318.116152662" watchObservedRunningTime="2026-01-28 18:51:52.17187632 +0000 UTC m=+1318.136059204" Jan 28 18:51:52 crc kubenswrapper[4767]: I0128 18:51:52.810378 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9" path="/var/lib/kubelet/pods/1c05cf04-9e30-4aa2-b2ad-2aadc34a10c9/volumes" Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.117899 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-757c589dd9-zvtp6" event={"ID":"a77e5f05-fa4b-4899-acca-36efb0710320","Type":"ContainerStarted","Data":"1d569ad00add7510c417d36e4b587a21d5e80bb4200e5cc7f75a4f819d427638"} Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.118067 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.122169 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" event={"ID":"b31daada-81d3-4cfa-bf1d-8d05fc34f82b","Type":"ContainerStarted","Data":"88f654d48f170056c7d6f5aa5156853d6c13e4aea8dabe452034613e52773d8c"} Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.122365 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.126732 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerStarted","Data":"52e4e9bfdd70ecb2807469e043b7c4b79cbcc838f81fff3ef3f3c6bbff2227d3"} Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.139649 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678958868f-sc9dm" event={"ID":"62702b24-3b22-4f62-8c70-e14a8749be55","Type":"ContainerStarted","Data":"775bfb34fdf2012d5587b5946857ac7fc95204adb686531e6d8f3420a9cac071"} Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.139726 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-678958868f-sc9dm" event={"ID":"62702b24-3b22-4f62-8c70-e14a8749be55","Type":"ContainerStarted","Data":"a23040e06523c8c6439f6252783e8e63acc97e28de0f3753a35d1dda41c2b50c"} Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.142513 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.154818 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/heat-api-757c589dd9-zvtp6" podStartSLOduration=12.458350597 podStartE2EDuration="13.154779845s" podCreationTimestamp="2026-01-28 18:51:40 +0000 UTC" firstStartedPulling="2026-01-28 18:51:49.582494426 +0000 UTC m=+1315.546677300" lastFinishedPulling="2026-01-28 18:51:50.278923674 +0000 UTC m=+1316.243106548" observedRunningTime="2026-01-28 18:51:53.142835552 +0000 UTC m=+1319.107018436" watchObservedRunningTime="2026-01-28 18:51:53.154779845 +0000 UTC m=+1319.118962729" Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.166882 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.218197 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-678958868f-sc9dm" podStartSLOduration=4.218171905 podStartE2EDuration="4.218171905s" podCreationTimestamp="2026-01-28 18:51:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:51:53.176006798 +0000 UTC m=+1319.140189682" watchObservedRunningTime="2026-01-28 18:51:53.218171905 +0000 UTC m=+1319.182354779" Jan 28 18:51:53 crc kubenswrapper[4767]: I0128 18:51:53.695409 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.154841 4767 generic.go:334] "Generic (PLEG): container finished" podID="a77e5f05-fa4b-4899-acca-36efb0710320" containerID="1d569ad00add7510c417d36e4b587a21d5e80bb4200e5cc7f75a4f819d427638" exitCode=1 Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.154959 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-757c589dd9-zvtp6" event={"ID":"a77e5f05-fa4b-4899-acca-36efb0710320","Type":"ContainerDied","Data":"1d569ad00add7510c417d36e4b587a21d5e80bb4200e5cc7f75a4f819d427638"} Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.155102 4767 scope.go:117] "RemoveContainer" containerID="41e57127d0bebf0a0db058d54d4fd976bc1dde2b77ae49429e9524b88f483185" Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.155765 4767 scope.go:117] "RemoveContainer" containerID="1d569ad00add7510c417d36e4b587a21d5e80bb4200e5cc7f75a4f819d427638" Jan 28 18:51:54 crc kubenswrapper[4767]: E0128 18:51:54.156290 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-757c589dd9-zvtp6_openstack(a77e5f05-fa4b-4899-acca-36efb0710320)\"" pod="openstack/heat-api-757c589dd9-zvtp6" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.159468 4767 generic.go:334] "Generic (PLEG): container finished" podID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerID="88f654d48f170056c7d6f5aa5156853d6c13e4aea8dabe452034613e52773d8c" exitCode=1 Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.159579 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" event={"ID":"b31daada-81d3-4cfa-bf1d-8d05fc34f82b","Type":"ContainerDied","Data":"88f654d48f170056c7d6f5aa5156853d6c13e4aea8dabe452034613e52773d8c"} Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.160174 4767 scope.go:117] "RemoveContainer" containerID="88f654d48f170056c7d6f5aa5156853d6c13e4aea8dabe452034613e52773d8c" Jan 28 18:51:54 crc kubenswrapper[4767]: E0128 18:51:54.160476 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-dc6d99b6-mtv24_openstack(b31daada-81d3-4cfa-bf1d-8d05fc34f82b)\"" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.182649 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerStarted","Data":"bde4fa2000377a13dce2f240f6780ceadad58e8b800b36a2ae256d2acf47b7f2"} Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.208829 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-66c6598f9f-9w6r9" Jan 28 18:51:54 crc kubenswrapper[4767]: I0128 18:51:54.393987 4767 scope.go:117] "RemoveContainer" containerID="ae4aac87a9b2937d1a7abed809cad66685a36e9496503e90e2b4071ee3a81319" Jan 28 18:51:55 crc kubenswrapper[4767]: I0128 18:51:55.214879 4767 scope.go:117] "RemoveContainer" containerID="1d569ad00add7510c417d36e4b587a21d5e80bb4200e5cc7f75a4f819d427638" Jan 28 18:51:55 crc kubenswrapper[4767]: E0128 18:51:55.215606 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-757c589dd9-zvtp6_openstack(a77e5f05-fa4b-4899-acca-36efb0710320)\"" pod="openstack/heat-api-757c589dd9-zvtp6" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" Jan 28 18:51:55 crc kubenswrapper[4767]: I0128 18:51:55.216578 4767 scope.go:117] "RemoveContainer" containerID="88f654d48f170056c7d6f5aa5156853d6c13e4aea8dabe452034613e52773d8c" Jan 28 18:51:55 crc kubenswrapper[4767]: E0128 18:51:55.217810 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-dc6d99b6-mtv24_openstack(b31daada-81d3-4cfa-bf1d-8d05fc34f82b)\"" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" Jan 28 18:51:55 crc kubenswrapper[4767]: I0128 18:51:55.493282 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:51:55 crc kubenswrapper[4767]: I0128 18:51:55.542298 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:51:55 crc kubenswrapper[4767]: I0128 18:51:55.993606 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:51:56 crc kubenswrapper[4767]: I0128 18:51:56.231328 4767 scope.go:117] "RemoveContainer" containerID="88f654d48f170056c7d6f5aa5156853d6c13e4aea8dabe452034613e52773d8c" Jan 28 18:51:56 crc kubenswrapper[4767]: I0128 18:51:56.231635 4767 scope.go:117] "RemoveContainer" containerID="1d569ad00add7510c417d36e4b587a21d5e80bb4200e5cc7f75a4f819d427638" Jan 28 18:51:56 crc kubenswrapper[4767]: E0128 18:51:56.231942 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-api\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-api pod=heat-api-757c589dd9-zvtp6_openstack(a77e5f05-fa4b-4899-acca-36efb0710320)\"" pod="openstack/heat-api-757c589dd9-zvtp6" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" Jan 28 18:51:56 crc kubenswrapper[4767]: E0128 18:51:56.232455 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"heat-cfnapi\" with CrashLoopBackOff: \"back-off 10s restarting failed container=heat-cfnapi pod=heat-cfnapi-dc6d99b6-mtv24_openstack(b31daada-81d3-4cfa-bf1d-8d05fc34f82b)\"" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" Jan 28 18:51:57 crc kubenswrapper[4767]: I0128 18:51:57.182508 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:51:57 crc kubenswrapper[4767]: I0128 18:51:57.367097 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f8cd4d6dc-bpnbm"] Jan 28 18:51:57 crc kubenswrapper[4767]: I0128 18:51:57.367584 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerName="dnsmasq-dns" containerID="cri-o://8456cdd09ab332083994f954e13921f2ca1748bf6b9b10ac39da74ce7992b8f4" gracePeriod=10 Jan 28 18:51:58 crc kubenswrapper[4767]: I0128 18:51:58.404137 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:51:58 crc kubenswrapper[4767]: I0128 18:51:58.759886 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.166:5353: connect: connection refused" Jan 28 18:51:59 crc kubenswrapper[4767]: I0128 18:51:59.299679 4767 generic.go:334] "Generic (PLEG): container finished" podID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerID="8456cdd09ab332083994f954e13921f2ca1748bf6b9b10ac39da74ce7992b8f4" exitCode=0 Jan 28 18:51:59 crc kubenswrapper[4767]: I0128 18:51:59.299740 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" event={"ID":"1a40cf4b-e422-4f81-a8b6-6f202a809438","Type":"ContainerDied","Data":"8456cdd09ab332083994f954e13921f2ca1748bf6b9b10ac39da74ce7992b8f4"} Jan 28 18:51:59 crc kubenswrapper[4767]: I0128 18:51:59.967988 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.081762 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-config\") pod \"1a40cf4b-e422-4f81-a8b6-6f202a809438\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.082738 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-sb\") pod \"1a40cf4b-e422-4f81-a8b6-6f202a809438\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.082767 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgwk4\" (UniqueName: \"kubernetes.io/projected/1a40cf4b-e422-4f81-a8b6-6f202a809438-kube-api-access-mgwk4\") pod \"1a40cf4b-e422-4f81-a8b6-6f202a809438\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.082822 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-swift-storage-0\") pod \"1a40cf4b-e422-4f81-a8b6-6f202a809438\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.082843 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-nb\") pod \"1a40cf4b-e422-4f81-a8b6-6f202a809438\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.082883 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-svc\") pod \"1a40cf4b-e422-4f81-a8b6-6f202a809438\" (UID: \"1a40cf4b-e422-4f81-a8b6-6f202a809438\") " Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.105029 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a40cf4b-e422-4f81-a8b6-6f202a809438-kube-api-access-mgwk4" (OuterVolumeSpecName: "kube-api-access-mgwk4") pod "1a40cf4b-e422-4f81-a8b6-6f202a809438" (UID: "1a40cf4b-e422-4f81-a8b6-6f202a809438"). InnerVolumeSpecName "kube-api-access-mgwk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.192361 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgwk4\" (UniqueName: \"kubernetes.io/projected/1a40cf4b-e422-4f81-a8b6-6f202a809438-kube-api-access-mgwk4\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.228302 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-config" (OuterVolumeSpecName: "config") pod "1a40cf4b-e422-4f81-a8b6-6f202a809438" (UID: "1a40cf4b-e422-4f81-a8b6-6f202a809438"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.280519 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1a40cf4b-e422-4f81-a8b6-6f202a809438" (UID: "1a40cf4b-e422-4f81-a8b6-6f202a809438"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.283835 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1a40cf4b-e422-4f81-a8b6-6f202a809438" (UID: "1a40cf4b-e422-4f81-a8b6-6f202a809438"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.294959 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.295003 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.295015 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.301651 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1a40cf4b-e422-4f81-a8b6-6f202a809438" (UID: "1a40cf4b-e422-4f81-a8b6-6f202a809438"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.303950 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1a40cf4b-e422-4f81-a8b6-6f202a809438" (UID: "1a40cf4b-e422-4f81-a8b6-6f202a809438"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.317810 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" event={"ID":"1a40cf4b-e422-4f81-a8b6-6f202a809438","Type":"ContainerDied","Data":"21b8627b30476179d57336ea20473869f2a287682f43a3bd1ad2578e7af5efb0"} Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.317883 4767 scope.go:117] "RemoveContainer" containerID="8456cdd09ab332083994f954e13921f2ca1748bf6b9b10ac39da74ce7992b8f4" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.318088 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f8cd4d6dc-bpnbm" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.344924 4767 scope.go:117] "RemoveContainer" containerID="35f1c8c3820593ab7a98b6298a1097ecd4f1633f28326e42421804ca1ada8c75" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.371299 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f8cd4d6dc-bpnbm"] Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.397195 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.397260 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1a40cf4b-e422-4f81-a8b6-6f202a809438-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.399598 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f8cd4d6dc-bpnbm"] Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.473685 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-engine-75f5487b88-96qvr" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.543121 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-59d5689489-z5swl"] Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.543440 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/heat-engine-59d5689489-z5swl" podUID="33e5dd4b-468d-4a50-9429-1faba885d020" containerName="heat-engine" containerID="cri-o://56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" gracePeriod=60 Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.809032 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" path="/var/lib/kubelet/pods/1a40cf4b-e422-4f81-a8b6-6f202a809438/volumes" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.833765 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-api-68db597dbc-btcng" Jan 28 18:52:00 crc kubenswrapper[4767]: I0128 18:52:00.912844 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-757c589dd9-zvtp6"] Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.028883 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/heat-cfnapi-7fb965dcbf-xj95b" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.137312 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-dc6d99b6-mtv24"] Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.372462 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerStarted","Data":"6203742ef0dba2555eaff2ba006939523891961f19cc050f441458495060bfd1"} Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.372669 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-central-agent" containerID="cri-o://2e89e42108b5d94bbe9a6e5655bf4d3defed609495f59d702d3d6454336b75cf" gracePeriod=30 Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.373077 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.373410 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="proxy-httpd" containerID="cri-o://6203742ef0dba2555eaff2ba006939523891961f19cc050f441458495060bfd1" gracePeriod=30 Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.373459 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="sg-core" containerID="cri-o://bde4fa2000377a13dce2f240f6780ceadad58e8b800b36a2ae256d2acf47b7f2" gracePeriod=30 Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.373494 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-notification-agent" containerID="cri-o://52e4e9bfdd70ecb2807469e043b7c4b79cbcc838f81fff3ef3f3c6bbff2227d3" gracePeriod=30 Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.432648 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.997117701 podStartE2EDuration="13.432611475s" podCreationTimestamp="2026-01-28 18:51:48 +0000 UTC" firstStartedPulling="2026-01-28 18:51:50.170307622 +0000 UTC m=+1316.134490496" lastFinishedPulling="2026-01-28 18:51:59.605801396 +0000 UTC m=+1325.569984270" observedRunningTime="2026-01-28 18:52:01.410798495 +0000 UTC m=+1327.374981369" watchObservedRunningTime="2026-01-28 18:52:01.432611475 +0000 UTC m=+1327.396794349" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.538335 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.538616 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-log" containerID="cri-o://bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45" gracePeriod=30 Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.538854 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-httpd" containerID="cri-o://a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2" gracePeriod=30 Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.729584 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.755582 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk294\" (UniqueName: \"kubernetes.io/projected/a77e5f05-fa4b-4899-acca-36efb0710320-kube-api-access-mk294\") pod \"a77e5f05-fa4b-4899-acca-36efb0710320\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.755708 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-combined-ca-bundle\") pod \"a77e5f05-fa4b-4899-acca-36efb0710320\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.755806 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data-custom\") pod \"a77e5f05-fa4b-4899-acca-36efb0710320\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.755923 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data\") pod \"a77e5f05-fa4b-4899-acca-36efb0710320\" (UID: \"a77e5f05-fa4b-4899-acca-36efb0710320\") " Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.771388 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "a77e5f05-fa4b-4899-acca-36efb0710320" (UID: "a77e5f05-fa4b-4899-acca-36efb0710320"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.773869 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a77e5f05-fa4b-4899-acca-36efb0710320-kube-api-access-mk294" (OuterVolumeSpecName: "kube-api-access-mk294") pod "a77e5f05-fa4b-4899-acca-36efb0710320" (UID: "a77e5f05-fa4b-4899-acca-36efb0710320"). InnerVolumeSpecName "kube-api-access-mk294". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.843885 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a77e5f05-fa4b-4899-acca-36efb0710320" (UID: "a77e5f05-fa4b-4899-acca-36efb0710320"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.860484 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk294\" (UniqueName: \"kubernetes.io/projected/a77e5f05-fa4b-4899-acca-36efb0710320-kube-api-access-mk294\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.860833 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.860935 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.934892 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data" (OuterVolumeSpecName: "config-data") pod "a77e5f05-fa4b-4899-acca-36efb0710320" (UID: "a77e5f05-fa4b-4899-acca-36efb0710320"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:01 crc kubenswrapper[4767]: I0128 18:52:01.963164 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a77e5f05-fa4b-4899-acca-36efb0710320-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.006184 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.064849 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-combined-ca-bundle\") pod \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.065416 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data-custom\") pod \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.065517 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8flx\" (UniqueName: \"kubernetes.io/projected/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-kube-api-access-h8flx\") pod \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.065840 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data\") pod \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\" (UID: \"b31daada-81d3-4cfa-bf1d-8d05fc34f82b\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.069926 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "b31daada-81d3-4cfa-bf1d-8d05fc34f82b" (UID: "b31daada-81d3-4cfa-bf1d-8d05fc34f82b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.071700 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-kube-api-access-h8flx" (OuterVolumeSpecName: "kube-api-access-h8flx") pod "b31daada-81d3-4cfa-bf1d-8d05fc34f82b" (UID: "b31daada-81d3-4cfa-bf1d-8d05fc34f82b"). InnerVolumeSpecName "kube-api-access-h8flx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.102338 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b31daada-81d3-4cfa-bf1d-8d05fc34f82b" (UID: "b31daada-81d3-4cfa-bf1d-8d05fc34f82b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.132704 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data" (OuterVolumeSpecName: "config-data") pod "b31daada-81d3-4cfa-bf1d-8d05fc34f82b" (UID: "b31daada-81d3-4cfa-bf1d-8d05fc34f82b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.169421 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8flx\" (UniqueName: \"kubernetes.io/projected/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-kube-api-access-h8flx\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.169495 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.170511 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.170567 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b31daada-81d3-4cfa-bf1d-8d05fc34f82b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453160 4767 generic.go:334] "Generic (PLEG): container finished" podID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerID="6203742ef0dba2555eaff2ba006939523891961f19cc050f441458495060bfd1" exitCode=0 Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453308 4767 generic.go:334] "Generic (PLEG): container finished" podID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerID="bde4fa2000377a13dce2f240f6780ceadad58e8b800b36a2ae256d2acf47b7f2" exitCode=2 Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453327 4767 generic.go:334] "Generic (PLEG): container finished" podID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerID="52e4e9bfdd70ecb2807469e043b7c4b79cbcc838f81fff3ef3f3c6bbff2227d3" exitCode=0 Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453337 4767 generic.go:334] "Generic (PLEG): container finished" podID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerID="2e89e42108b5d94bbe9a6e5655bf4d3defed609495f59d702d3d6454336b75cf" exitCode=0 Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453406 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerDied","Data":"6203742ef0dba2555eaff2ba006939523891961f19cc050f441458495060bfd1"} Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453441 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerDied","Data":"bde4fa2000377a13dce2f240f6780ceadad58e8b800b36a2ae256d2acf47b7f2"} Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453454 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerDied","Data":"52e4e9bfdd70ecb2807469e043b7c4b79cbcc838f81fff3ef3f3c6bbff2227d3"} Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.453464 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerDied","Data":"2e89e42108b5d94bbe9a6e5655bf4d3defed609495f59d702d3d6454336b75cf"} Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.455106 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-api-757c589dd9-zvtp6" event={"ID":"a77e5f05-fa4b-4899-acca-36efb0710320","Type":"ContainerDied","Data":"102fd1f3aaea8bc8ffffe148018b6d3f3cbf8c348d759846d33a050e3ed74f4b"} Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.455139 4767 scope.go:117] "RemoveContainer" containerID="1d569ad00add7510c417d36e4b587a21d5e80bb4200e5cc7f75a4f819d427638" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.455270 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-api-757c589dd9-zvtp6" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.475928 4767 generic.go:334] "Generic (PLEG): container finished" podID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerID="bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45" exitCode=143 Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.476065 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1dd0d0af-4059-48d5-9251-34c7f04df1c3","Type":"ContainerDied","Data":"bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45"} Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.492639 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" event={"ID":"b31daada-81d3-4cfa-bf1d-8d05fc34f82b","Type":"ContainerDied","Data":"7945b14c3e2e61bc9709514f981385d51c15a9478b85ca93db0802b33f4457ba"} Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.492935 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-dc6d99b6-mtv24" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.529996 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-8zgk9"] Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.530727 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerName="dnsmasq-dns" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.530759 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerName="dnsmasq-dns" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.530777 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerName="heat-cfnapi" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.530787 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerName="heat-cfnapi" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.530809 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerName="init" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.530821 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerName="init" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.530846 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" containerName="heat-api" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.530852 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" containerName="heat-api" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.530885 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" containerName="heat-api" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.530893 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" containerName="heat-api" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.531127 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" containerName="heat-api" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.531147 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerName="heat-cfnapi" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.531158 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerName="heat-cfnapi" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.531184 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a40cf4b-e422-4f81-a8b6-6f202a809438" containerName="dnsmasq-dns" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.532172 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.560651 4767 scope.go:117] "RemoveContainer" containerID="88f654d48f170056c7d6f5aa5156853d6c13e4aea8dabe452034613e52773d8c" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.564350 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-api-757c589dd9-zvtp6"] Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.584162 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-operator-scripts\") pod \"nova-api-db-create-8zgk9\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.589354 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d7bd\" (UniqueName: \"kubernetes.io/projected/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-kube-api-access-4d7bd\") pod \"nova-api-db-create-8zgk9\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.593379 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-api-757c589dd9-zvtp6"] Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.616376 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8zgk9"] Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.635382 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.694331 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l6tgm\" (UniqueName: \"kubernetes.io/projected/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-kube-api-access-l6tgm\") pod \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.694845 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-config-data\") pod \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.694911 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-sg-core-conf-yaml\") pod \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.694963 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-combined-ca-bundle\") pod \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.694992 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-run-httpd\") pod \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.695066 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-log-httpd\") pod \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.695181 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-scripts\") pod \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\" (UID: \"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff\") " Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.695989 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d7bd\" (UniqueName: \"kubernetes.io/projected/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-kube-api-access-4d7bd\") pod \"nova-api-db-create-8zgk9\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.696107 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-operator-scripts\") pod \"nova-api-db-create-8zgk9\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.696137 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" (UID: "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.711428 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-operator-scripts\") pod \"nova-api-db-create-8zgk9\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.713163 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" (UID: "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.725316 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-kube-api-access-l6tgm" (OuterVolumeSpecName: "kube-api-access-l6tgm") pod "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" (UID: "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff"). InnerVolumeSpecName "kube-api-access-l6tgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.751314 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-scripts" (OuterVolumeSpecName: "scripts") pod "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" (UID: "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.769872 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d7bd\" (UniqueName: \"kubernetes.io/projected/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-kube-api-access-4d7bd\") pod \"nova-api-db-create-8zgk9\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.775922 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-5p9bh"] Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.777185 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="proxy-httpd" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.786623 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="proxy-httpd" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.786752 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerName="heat-cfnapi" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.786765 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" containerName="heat-cfnapi" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.786793 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-central-agent" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.786800 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-central-agent" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.787027 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-notification-agent" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.787036 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-notification-agent" Jan 28 18:52:02 crc kubenswrapper[4767]: E0128 18:52:02.787061 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="sg-core" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.787067 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="sg-core" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.787770 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-notification-agent" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.787794 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="sg-core" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.787812 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="ceilometer-central-agent" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.787850 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" containerName="proxy-httpd" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.787868 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" containerName="heat-api" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.800090 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.805431 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" (UID: "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.812619 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.812672 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.812684 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.812699 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.812713 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l6tgm\" (UniqueName: \"kubernetes.io/projected/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-kube-api-access-l6tgm\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.832155 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a77e5f05-fa4b-4899-acca-36efb0710320" path="/var/lib/kubelet/pods/a77e5f05-fa4b-4899-acca-36efb0710320/volumes" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.872105 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" (UID: "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.917331 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-operator-scripts\") pod \"nova-cell0-db-create-5p9bh\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.918214 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkvj6\" (UniqueName: \"kubernetes.io/projected/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-kube-api-access-gkvj6\") pod \"nova-cell0-db-create-5p9bh\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.918883 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.932854 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-5p9bh"] Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.932906 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-dc6d99b6-mtv24"] Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.932921 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-dc6d99b6-mtv24"] Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.932938 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-f207-account-create-update-4rckz"] Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.939562 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.941643 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:02 crc kubenswrapper[4767]: I0128 18:52:02.957730 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.001334 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f207-account-create-update-4rckz"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.021598 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-config-data" (OuterVolumeSpecName: "config-data") pod "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" (UID: "4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.022942 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-pfccx"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.024914 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.036989 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-operator-scripts\") pod \"nova-api-f207-account-create-update-4rckz\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.037797 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-operator-scripts\") pod \"nova-cell0-db-create-5p9bh\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.037916 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkvj6\" (UniqueName: \"kubernetes.io/projected/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-kube-api-access-gkvj6\") pod \"nova-cell0-db-create-5p9bh\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.037961 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29sxm\" (UniqueName: \"kubernetes.io/projected/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-kube-api-access-29sxm\") pod \"nova-api-f207-account-create-update-4rckz\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.038045 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.038836 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-operator-scripts\") pod \"nova-cell0-db-create-5p9bh\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.051519 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pfccx"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.062770 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-2cb5-account-create-update-tccrf"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.064692 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.067042 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkvj6\" (UniqueName: \"kubernetes.io/projected/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-kube-api-access-gkvj6\") pod \"nova-cell0-db-create-5p9bh\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.069779 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.076448 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2cb5-account-create-update-tccrf"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.143982 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a673b46b-2808-4772-963d-7f2ae90be2fe-operator-scripts\") pod \"nova-cell0-2cb5-account-create-update-tccrf\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.144608 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cm7sk\" (UniqueName: \"kubernetes.io/projected/a673b46b-2808-4772-963d-7f2ae90be2fe-kube-api-access-cm7sk\") pod \"nova-cell0-2cb5-account-create-update-tccrf\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.144711 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29sxm\" (UniqueName: \"kubernetes.io/projected/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-kube-api-access-29sxm\") pod \"nova-api-f207-account-create-update-4rckz\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.144822 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-operator-scripts\") pod \"nova-api-f207-account-create-update-4rckz\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.144866 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e93b8328-4f9c-47da-8af8-c86f5acf443b-operator-scripts\") pod \"nova-cell1-db-create-pfccx\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.144926 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rmxms\" (UniqueName: \"kubernetes.io/projected/e93b8328-4f9c-47da-8af8-c86f5acf443b-kube-api-access-rmxms\") pod \"nova-cell1-db-create-pfccx\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.146479 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-operator-scripts\") pod \"nova-api-f207-account-create-update-4rckz\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.154815 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-6d16-account-create-update-4r74q"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.156768 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.168659 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.170090 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-6d16-account-create-update-4r74q"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.174007 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29sxm\" (UniqueName: \"kubernetes.io/projected/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-kube-api-access-29sxm\") pod \"nova-api-f207-account-create-update-4rckz\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.249052 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-operator-scripts\") pod \"nova-cell1-6d16-account-create-update-4r74q\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.249236 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e93b8328-4f9c-47da-8af8-c86f5acf443b-operator-scripts\") pod \"nova-cell1-db-create-pfccx\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.249309 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rmxms\" (UniqueName: \"kubernetes.io/projected/e93b8328-4f9c-47da-8af8-c86f5acf443b-kube-api-access-rmxms\") pod \"nova-cell1-db-create-pfccx\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.249417 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a673b46b-2808-4772-963d-7f2ae90be2fe-operator-scripts\") pod \"nova-cell0-2cb5-account-create-update-tccrf\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.249454 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8ckq\" (UniqueName: \"kubernetes.io/projected/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-kube-api-access-f8ckq\") pod \"nova-cell1-6d16-account-create-update-4r74q\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.249491 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cm7sk\" (UniqueName: \"kubernetes.io/projected/a673b46b-2808-4772-963d-7f2ae90be2fe-kube-api-access-cm7sk\") pod \"nova-cell0-2cb5-account-create-update-tccrf\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.250758 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e93b8328-4f9c-47da-8af8-c86f5acf443b-operator-scripts\") pod \"nova-cell1-db-create-pfccx\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.251266 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a673b46b-2808-4772-963d-7f2ae90be2fe-operator-scripts\") pod \"nova-cell0-2cb5-account-create-update-tccrf\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.270532 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.277191 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rmxms\" (UniqueName: \"kubernetes.io/projected/e93b8328-4f9c-47da-8af8-c86f5acf443b-kube-api-access-rmxms\") pod \"nova-cell1-db-create-pfccx\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.283236 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cm7sk\" (UniqueName: \"kubernetes.io/projected/a673b46b-2808-4772-963d-7f2ae90be2fe-kube-api-access-cm7sk\") pod \"nova-cell0-2cb5-account-create-update-tccrf\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.289588 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.351800 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-operator-scripts\") pod \"nova-cell1-6d16-account-create-update-4r74q\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.352074 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8ckq\" (UniqueName: \"kubernetes.io/projected/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-kube-api-access-f8ckq\") pod \"nova-cell1-6d16-account-create-update-4r74q\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.361361 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.362464 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-operator-scripts\") pod \"nova-cell1-6d16-account-create-update-4r74q\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.384514 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.386292 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8ckq\" (UniqueName: \"kubernetes.io/projected/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-kube-api-access-f8ckq\") pod \"nova-cell1-6d16-account-create-update-4r74q\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.526543 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.584393 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-8zgk9"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.627434 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff","Type":"ContainerDied","Data":"b5d699a5730055140918b2b7d7d675dbc0f995d6eed898e49438d77aa0ed14fa"} Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.627506 4767 scope.go:117] "RemoveContainer" containerID="6203742ef0dba2555eaff2ba006939523891961f19cc050f441458495060bfd1" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.627607 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: E0128 18:52:03.627683 4767 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 18:52:03 crc kubenswrapper[4767]: E0128 18:52:03.650942 4767 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 18:52:03 crc kubenswrapper[4767]: E0128 18:52:03.668510 4767 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" cmd=["/usr/bin/pgrep","-r","DRST","heat-engine"] Jan 28 18:52:03 crc kubenswrapper[4767]: E0128 18:52:03.668623 4767 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/heat-engine-59d5689489-z5swl" podUID="33e5dd4b-468d-4a50-9429-1faba885d020" containerName="heat-engine" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.702615 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.720480 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.732311 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.735573 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.737969 4767 scope.go:117] "RemoveContainer" containerID="bde4fa2000377a13dce2f240f6780ceadad58e8b800b36a2ae256d2acf47b7f2" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.762978 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.770510 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.771217 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.864699 4767 scope.go:117] "RemoveContainer" containerID="52e4e9bfdd70ecb2807469e043b7c4b79cbcc838f81fff3ef3f3c6bbff2227d3" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.900844 4767 scope.go:117] "RemoveContainer" containerID="2e89e42108b5d94bbe9a6e5655bf4d3defed609495f59d702d3d6454336b75cf" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.921879 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-config-data\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.921967 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-log-httpd\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.922014 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-scripts\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.922038 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.922110 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.922475 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vz9rb\" (UniqueName: \"kubernetes.io/projected/17b6b71a-81a9-4343-b582-0c75aeb65bb1-kube-api-access-vz9rb\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:03 crc kubenswrapper[4767]: I0128 18:52:03.922502 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-run-httpd\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.022484 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-f207-account-create-update-4rckz"] Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.028420 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vz9rb\" (UniqueName: \"kubernetes.io/projected/17b6b71a-81a9-4343-b582-0c75aeb65bb1-kube-api-access-vz9rb\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.028501 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-run-httpd\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.028565 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-config-data\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.028615 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-log-httpd\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.028663 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-scripts\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.028689 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.028773 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.038362 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-log-httpd\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.053309 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-5p9bh"] Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.059302 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-run-httpd\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.088365 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.088696 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-config-data\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.089017 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-scripts\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.089188 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.096776 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vz9rb\" (UniqueName: \"kubernetes.io/projected/17b6b71a-81a9-4343-b582-0c75aeb65bb1-kube-api-access-vz9rb\") pod \"ceilometer-0\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.152379 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.319577 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-2cb5-account-create-update-tccrf"] Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.409549 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-pfccx"] Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.505467 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-6d16-account-create-update-4r74q"] Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.915921 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff" path="/var/lib/kubelet/pods/4df50f7c-13d4-47f6-99e9-2f13ecf4a9ff/volumes" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.929631 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b31daada-81d3-4cfa-bf1d-8d05fc34f82b" path="/var/lib/kubelet/pods/b31daada-81d3-4cfa-bf1d-8d05fc34f82b/volumes" Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.930531 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" event={"ID":"a673b46b-2808-4772-963d-7f2ae90be2fe","Type":"ContainerStarted","Data":"f2cfc3049c0f54fdf6fc4b3aecc809f5387bb2069a8f63b05b19e229c256ed47"} Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.930566 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" event={"ID":"10bc51c5-1acb-4d5f-9fde-fe1b17388f51","Type":"ContainerStarted","Data":"9cf369c13216f2da54c9d9d7d600ceb3f6878173cd14ee32a6ac68c31acaead8"} Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.930584 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.930618 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8zgk9" event={"ID":"f4fecf89-74fc-47bd-83ff-876cc16e8dc8","Type":"ContainerStarted","Data":"228d3a5fb5a3dedcc6f264527a158ac4d4054c837492113ee1e0e32f3da46807"} Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.930638 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8zgk9" event={"ID":"f4fecf89-74fc-47bd-83ff-876cc16e8dc8","Type":"ContainerStarted","Data":"9f4cd0334840c3e913395f58d26ff87b9b3cc16fe89a03e06a03329c2eb67734"} Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.930947 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-log" containerID="cri-o://7a4e13758cf6c450608437b107663b30e9e6bf86e3f86eaf412130a7117966e2" gracePeriod=30 Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.931185 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-httpd" containerID="cri-o://48d6b4934cfcf5fb0462346dd6a9cb82e588d8d558c9f955d23dc8d2654d1c16" gracePeriod=30 Jan 28 18:52:04 crc kubenswrapper[4767]: I0128 18:52:04.949449 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pfccx" event={"ID":"e93b8328-4f9c-47da-8af8-c86f5acf443b","Type":"ContainerStarted","Data":"c9e8d6cb1353b22510d1d17c3512f3bc875b3d570be40022aa8144b72df3a6d4"} Jan 28 18:52:05 crc kubenswrapper[4767]: I0128 18:52:05.004157 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f207-account-create-update-4rckz" event={"ID":"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c","Type":"ContainerStarted","Data":"882aba13f2dc74102cf6584d8992951581ad8824e1f53d388ba198c7d3e4a67e"} Jan 28 18:52:05 crc kubenswrapper[4767]: I0128 18:52:05.033314 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:05 crc kubenswrapper[4767]: I0128 18:52:05.051724 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5p9bh" event={"ID":"b1cdbe43-1ad1-46dd-9349-b7322d4068a0","Type":"ContainerStarted","Data":"632c85cbc9390b5df54b24dcecefaf5a2fe3b49b8a48bf0b959ca40ca7a53a9e"} Jan 28 18:52:05 crc kubenswrapper[4767]: I0128 18:52:05.124351 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-5p9bh" podStartSLOduration=3.124323504 podStartE2EDuration="3.124323504s" podCreationTimestamp="2026-01-28 18:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:05.093895274 +0000 UTC m=+1331.058078158" watchObservedRunningTime="2026-01-28 18:52:05.124323504 +0000 UTC m=+1331.088506378" Jan 28 18:52:05 crc kubenswrapper[4767]: I0128 18:52:05.185914 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-8zgk9" podStartSLOduration=3.185888527 podStartE2EDuration="3.185888527s" podCreationTimestamp="2026-01-28 18:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:05.118417751 +0000 UTC m=+1331.082600635" watchObservedRunningTime="2026-01-28 18:52:05.185888527 +0000 UTC m=+1331.150071401" Jan 28 18:52:05 crc kubenswrapper[4767]: I0128 18:52:05.204259 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-f207-account-create-update-4rckz" podStartSLOduration=3.20421494 podStartE2EDuration="3.20421494s" podCreationTimestamp="2026-01-28 18:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:05.152813204 +0000 UTC m=+1331.116996098" watchObservedRunningTime="2026-01-28 18:52:05.20421494 +0000 UTC m=+1331.168397814" Jan 28 18:52:05 crc kubenswrapper[4767]: I0128 18:52:05.949874 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018182 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-config-data\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018301 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-public-tls-certs\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018344 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-combined-ca-bundle\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018452 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-logs\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018648 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-scripts\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018772 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018844 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j6p2v\" (UniqueName: \"kubernetes.io/projected/1dd0d0af-4059-48d5-9251-34c7f04df1c3-kube-api-access-j6p2v\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.018882 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-httpd-run\") pod \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\" (UID: \"1dd0d0af-4059-48d5-9251-34c7f04df1c3\") " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.019910 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.024015 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-logs" (OuterVolumeSpecName: "logs") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.038372 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1dd0d0af-4059-48d5-9251-34c7f04df1c3-kube-api-access-j6p2v" (OuterVolumeSpecName: "kube-api-access-j6p2v") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "kube-api-access-j6p2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.052692 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-scripts" (OuterVolumeSpecName: "scripts") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.056257 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.160891 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.161496 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j6p2v\" (UniqueName: \"kubernetes.io/projected/1dd0d0af-4059-48d5-9251-34c7f04df1c3-kube-api-access-j6p2v\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.161518 4767 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.161542 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1dd0d0af-4059-48d5-9251-34c7f04df1c3-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.161557 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.251392 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" event={"ID":"10bc51c5-1acb-4d5f-9fde-fe1b17388f51","Type":"ContainerStarted","Data":"192deb188367593ab30c3ab7eda6ea984473c509f676a8d58e0fef9ab2a99778"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.264537 4767 generic.go:334] "Generic (PLEG): container finished" podID="f4fecf89-74fc-47bd-83ff-876cc16e8dc8" containerID="228d3a5fb5a3dedcc6f264527a158ac4d4054c837492113ee1e0e32f3da46807" exitCode=0 Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.264810 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8zgk9" event={"ID":"f4fecf89-74fc-47bd-83ff-876cc16e8dc8","Type":"ContainerDied","Data":"228d3a5fb5a3dedcc6f264527a158ac4d4054c837492113ee1e0e32f3da46807"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.285038 4767 generic.go:334] "Generic (PLEG): container finished" podID="46edc543-4bb1-408d-babc-b542091bafa8" containerID="7a4e13758cf6c450608437b107663b30e9e6bf86e3f86eaf412130a7117966e2" exitCode=143 Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.285164 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"46edc543-4bb1-408d-babc-b542091bafa8","Type":"ContainerDied","Data":"7a4e13758cf6c450608437b107663b30e9e6bf86e3f86eaf412130a7117966e2"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.315029 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.316013 4767 generic.go:334] "Generic (PLEG): container finished" podID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerID="a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2" exitCode=0 Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.316081 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1dd0d0af-4059-48d5-9251-34c7f04df1c3","Type":"ContainerDied","Data":"a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.316118 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"1dd0d0af-4059-48d5-9251-34c7f04df1c3","Type":"ContainerDied","Data":"ea1dd9945a43d228bd7fa0ebdd88e8b14271a8935d10992430f3f8d7243dbb69"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.316148 4767 scope.go:117] "RemoveContainer" containerID="a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.320217 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.327041 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" podStartSLOduration=3.327011844 podStartE2EDuration="3.327011844s" podCreationTimestamp="2026-01-28 18:52:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:06.294063395 +0000 UTC m=+1332.258246269" watchObservedRunningTime="2026-01-28 18:52:06.327011844 +0000 UTC m=+1332.291194718" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.339195 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pfccx" event={"ID":"e93b8328-4f9c-47da-8af8-c86f5acf443b","Type":"ContainerStarted","Data":"277b8c35a4f17e6299df17af71aedd63abd30e506e1d19b236ea96676bdbf6d4"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.357624 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.376540 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f207-account-create-update-4rckz" event={"ID":"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c","Type":"ContainerStarted","Data":"2107bb8cf5993b9b2b5f431cdd50b5a489a1330c563c354cd74d146d1dc60585"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.387084 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.389530 4767 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.417119 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-config-data" (OuterVolumeSpecName: "config-data") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.420016 4767 scope.go:117] "RemoveContainer" containerID="bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.423071 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1dd0d0af-4059-48d5-9251-34c7f04df1c3" (UID: "1dd0d0af-4059-48d5-9251-34c7f04df1c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.429985 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" event={"ID":"a673b46b-2808-4772-963d-7f2ae90be2fe","Type":"ContainerStarted","Data":"2ed2838a73d2aa52ba7b4d23ad3d4b5a43fc236a521102dd5e2365fefd270f79"} Jan 28 18:52:06 crc kubenswrapper[4767]: E0128 18:52:06.431609 4767 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf4fecf89_74fc_47bd_83ff_876cc16e8dc8.slice/crio-conmon-228d3a5fb5a3dedcc6f264527a158ac4d4054c837492113ee1e0e32f3da46807.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode93b8328_4f9c_47da_8af8_c86f5acf443b.slice/crio-277b8c35a4f17e6299df17af71aedd63abd30e506e1d19b236ea96676bdbf6d4.scope\": RecentStats: unable to find data in memory cache]" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.440487 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5p9bh" event={"ID":"b1cdbe43-1ad1-46dd-9349-b7322d4068a0","Type":"ContainerStarted","Data":"d4a63588f30cf808b3b7a706a1f6f6047a2f8b069244fec9d3362944c8e19a66"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.443553 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerStarted","Data":"e9ba3c97638b8569e120a474eb876b69ae82c2efd1604dc5b826a9d848810148"} Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.466516 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" podStartSLOduration=4.46648686 podStartE2EDuration="4.46648686s" podCreationTimestamp="2026-01-28 18:52:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:06.461458833 +0000 UTC m=+1332.425641737" watchObservedRunningTime="2026-01-28 18:52:06.46648686 +0000 UTC m=+1332.430669844" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.492705 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.492781 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1dd0d0af-4059-48d5-9251-34c7f04df1c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.544860 4767 scope.go:117] "RemoveContainer" containerID="a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2" Jan 28 18:52:06 crc kubenswrapper[4767]: E0128 18:52:06.545940 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2\": container with ID starting with a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2 not found: ID does not exist" containerID="a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.545994 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2"} err="failed to get container status \"a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2\": rpc error: code = NotFound desc = could not find container \"a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2\": container with ID starting with a86518b42cc5ad060a9addd218c445a4d18bac75526d215726b8e02e7a1994e2 not found: ID does not exist" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.546034 4767 scope.go:117] "RemoveContainer" containerID="bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45" Jan 28 18:52:06 crc kubenswrapper[4767]: E0128 18:52:06.547121 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45\": container with ID starting with bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45 not found: ID does not exist" containerID="bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.547159 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45"} err="failed to get container status \"bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45\": rpc error: code = NotFound desc = could not find container \"bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45\": container with ID starting with bb3acc63d60e8b65f2a655972cfcaf2edf92277310a28cb7f177bf0ced92ed45 not found: ID does not exist" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.671338 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.689543 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.709635 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:52:06 crc kubenswrapper[4767]: E0128 18:52:06.710457 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-log" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.710529 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-log" Jan 28 18:52:06 crc kubenswrapper[4767]: E0128 18:52:06.710546 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-httpd" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.710553 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-httpd" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.710854 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-log" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.710876 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" containerName="glance-httpd" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.712659 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.717102 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.720142 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.731902 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.814914 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1dd0d0af-4059-48d5-9251-34c7f04df1c3" path="/var/lib/kubelet/pods/1dd0d0af-4059-48d5-9251-34c7f04df1c3/volumes" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.905936 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.906000 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.906075 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.906104 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8ec04a5-bc34-4006-b761-97437b8e5687-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.906149 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.906178 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khqwt\" (UniqueName: \"kubernetes.io/projected/a8ec04a5-bc34-4006-b761-97437b8e5687-kube-api-access-khqwt\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.906228 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8ec04a5-bc34-4006-b761-97437b8e5687-logs\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:06 crc kubenswrapper[4767]: I0128 18:52:06.906310 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.008841 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.008918 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.009069 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.009101 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8ec04a5-bc34-4006-b761-97437b8e5687-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.009155 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.009183 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khqwt\" (UniqueName: \"kubernetes.io/projected/a8ec04a5-bc34-4006-b761-97437b8e5687-kube-api-access-khqwt\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.009223 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8ec04a5-bc34-4006-b761-97437b8e5687-logs\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.009277 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.009950 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.011552 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/a8ec04a5-bc34-4006-b761-97437b8e5687-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.015749 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a8ec04a5-bc34-4006-b761-97437b8e5687-logs\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.020459 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-scripts\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.021414 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.022507 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-config-data\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.034375 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a8ec04a5-bc34-4006-b761-97437b8e5687-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.041277 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khqwt\" (UniqueName: \"kubernetes.io/projected/a8ec04a5-bc34-4006-b761-97437b8e5687-kube-api-access-khqwt\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.054528 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-external-api-0\" (UID: \"a8ec04a5-bc34-4006-b761-97437b8e5687\") " pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.340889 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.466655 4767 generic.go:334] "Generic (PLEG): container finished" podID="e93b8328-4f9c-47da-8af8-c86f5acf443b" containerID="277b8c35a4f17e6299df17af71aedd63abd30e506e1d19b236ea96676bdbf6d4" exitCode=0 Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.466734 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pfccx" event={"ID":"e93b8328-4f9c-47da-8af8-c86f5acf443b","Type":"ContainerDied","Data":"277b8c35a4f17e6299df17af71aedd63abd30e506e1d19b236ea96676bdbf6d4"} Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.477971 4767 generic.go:334] "Generic (PLEG): container finished" podID="c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c" containerID="2107bb8cf5993b9b2b5f431cdd50b5a489a1330c563c354cd74d146d1dc60585" exitCode=0 Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.478049 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f207-account-create-update-4rckz" event={"ID":"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c","Type":"ContainerDied","Data":"2107bb8cf5993b9b2b5f431cdd50b5a489a1330c563c354cd74d146d1dc60585"} Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.485761 4767 generic.go:334] "Generic (PLEG): container finished" podID="b1cdbe43-1ad1-46dd-9349-b7322d4068a0" containerID="d4a63588f30cf808b3b7a706a1f6f6047a2f8b069244fec9d3362944c8e19a66" exitCode=0 Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.485906 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5p9bh" event={"ID":"b1cdbe43-1ad1-46dd-9349-b7322d4068a0","Type":"ContainerDied","Data":"d4a63588f30cf808b3b7a706a1f6f6047a2f8b069244fec9d3362944c8e19a66"} Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.494183 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerStarted","Data":"907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97"} Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.502031 4767 generic.go:334] "Generic (PLEG): container finished" podID="a673b46b-2808-4772-963d-7f2ae90be2fe" containerID="2ed2838a73d2aa52ba7b4d23ad3d4b5a43fc236a521102dd5e2365fefd270f79" exitCode=0 Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.502147 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" event={"ID":"a673b46b-2808-4772-963d-7f2ae90be2fe","Type":"ContainerDied","Data":"2ed2838a73d2aa52ba7b4d23ad3d4b5a43fc236a521102dd5e2365fefd270f79"} Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.522913 4767 generic.go:334] "Generic (PLEG): container finished" podID="10bc51c5-1acb-4d5f-9fde-fe1b17388f51" containerID="192deb188367593ab30c3ab7eda6ea984473c509f676a8d58e0fef9ab2a99778" exitCode=0 Jan 28 18:52:07 crc kubenswrapper[4767]: I0128 18:52:07.523350 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" event={"ID":"10bc51c5-1acb-4d5f-9fde-fe1b17388f51","Type":"ContainerDied","Data":"192deb188367593ab30c3ab7eda6ea984473c509f676a8d58e0fef9ab2a99778"} Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.262567 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.379497 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e93b8328-4f9c-47da-8af8-c86f5acf443b-operator-scripts\") pod \"e93b8328-4f9c-47da-8af8-c86f5acf443b\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.379634 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rmxms\" (UniqueName: \"kubernetes.io/projected/e93b8328-4f9c-47da-8af8-c86f5acf443b-kube-api-access-rmxms\") pod \"e93b8328-4f9c-47da-8af8-c86f5acf443b\" (UID: \"e93b8328-4f9c-47da-8af8-c86f5acf443b\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.381519 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e93b8328-4f9c-47da-8af8-c86f5acf443b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e93b8328-4f9c-47da-8af8-c86f5acf443b" (UID: "e93b8328-4f9c-47da-8af8-c86f5acf443b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.389578 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e93b8328-4f9c-47da-8af8-c86f5acf443b-kube-api-access-rmxms" (OuterVolumeSpecName: "kube-api-access-rmxms") pod "e93b8328-4f9c-47da-8af8-c86f5acf443b" (UID: "e93b8328-4f9c-47da-8af8-c86f5acf443b"). InnerVolumeSpecName "kube-api-access-rmxms". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.446960 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.469074 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.485316 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-operator-scripts\") pod \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.485679 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkvj6\" (UniqueName: \"kubernetes.io/projected/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-kube-api-access-gkvj6\") pod \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.485734 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d7bd\" (UniqueName: \"kubernetes.io/projected/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-kube-api-access-4d7bd\") pod \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\" (UID: \"f4fecf89-74fc-47bd-83ff-876cc16e8dc8\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.485843 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-operator-scripts\") pod \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\" (UID: \"b1cdbe43-1ad1-46dd-9349-b7322d4068a0\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.488785 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f4fecf89-74fc-47bd-83ff-876cc16e8dc8" (UID: "f4fecf89-74fc-47bd-83ff-876cc16e8dc8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.489450 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b1cdbe43-1ad1-46dd-9349-b7322d4068a0" (UID: "b1cdbe43-1ad1-46dd-9349-b7322d4068a0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.490596 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e93b8328-4f9c-47da-8af8-c86f5acf443b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.490622 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.490639 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rmxms\" (UniqueName: \"kubernetes.io/projected/e93b8328-4f9c-47da-8af8-c86f5acf443b-kube-api-access-rmxms\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.490655 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.495076 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-kube-api-access-gkvj6" (OuterVolumeSpecName: "kube-api-access-gkvj6") pod "b1cdbe43-1ad1-46dd-9349-b7322d4068a0" (UID: "b1cdbe43-1ad1-46dd-9349-b7322d4068a0"). InnerVolumeSpecName "kube-api-access-gkvj6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.501354 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-kube-api-access-4d7bd" (OuterVolumeSpecName: "kube-api-access-4d7bd") pod "f4fecf89-74fc-47bd-83ff-876cc16e8dc8" (UID: "f4fecf89-74fc-47bd-83ff-876cc16e8dc8"). InnerVolumeSpecName "kube-api-access-4d7bd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.525931 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.546705 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-8zgk9" event={"ID":"f4fecf89-74fc-47bd-83ff-876cc16e8dc8","Type":"ContainerDied","Data":"9f4cd0334840c3e913395f58d26ff87b9b3cc16fe89a03e06a03329c2eb67734"} Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.546770 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9f4cd0334840c3e913395f58d26ff87b9b3cc16fe89a03e06a03329c2eb67734" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.546854 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-8zgk9" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.557864 4767 generic.go:334] "Generic (PLEG): container finished" podID="33e5dd4b-468d-4a50-9429-1faba885d020" containerID="56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" exitCode=0 Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.558082 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-engine-59d5689489-z5swl" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.558444 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59d5689489-z5swl" event={"ID":"33e5dd4b-468d-4a50-9429-1faba885d020","Type":"ContainerDied","Data":"56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4"} Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.558523 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-engine-59d5689489-z5swl" event={"ID":"33e5dd4b-468d-4a50-9429-1faba885d020","Type":"ContainerDied","Data":"25b9748b6f8dc26309146e40cd1ee4cc1b3eb8c7dfa36724dea162c8baf0d34f"} Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.558555 4767 scope.go:117] "RemoveContainer" containerID="56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.563119 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.577799 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-pfccx" event={"ID":"e93b8328-4f9c-47da-8af8-c86f5acf443b","Type":"ContainerDied","Data":"c9e8d6cb1353b22510d1d17c3512f3bc875b3d570be40022aa8144b72df3a6d4"} Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.578583 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c9e8d6cb1353b22510d1d17c3512f3bc875b3d570be40022aa8144b72df3a6d4" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.578419 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-pfccx" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.593648 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data\") pod \"33e5dd4b-468d-4a50-9429-1faba885d020\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.593886 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-combined-ca-bundle\") pod \"33e5dd4b-468d-4a50-9429-1faba885d020\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.593948 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jf2mf\" (UniqueName: \"kubernetes.io/projected/33e5dd4b-468d-4a50-9429-1faba885d020-kube-api-access-jf2mf\") pod \"33e5dd4b-468d-4a50-9429-1faba885d020\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.594143 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data-custom\") pod \"33e5dd4b-468d-4a50-9429-1faba885d020\" (UID: \"33e5dd4b-468d-4a50-9429-1faba885d020\") " Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.594842 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkvj6\" (UniqueName: \"kubernetes.io/projected/b1cdbe43-1ad1-46dd-9349-b7322d4068a0-kube-api-access-gkvj6\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.594864 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d7bd\" (UniqueName: \"kubernetes.io/projected/f4fecf89-74fc-47bd-83ff-876cc16e8dc8-kube-api-access-4d7bd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.615536 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33e5dd4b-468d-4a50-9429-1faba885d020-kube-api-access-jf2mf" (OuterVolumeSpecName: "kube-api-access-jf2mf") pod "33e5dd4b-468d-4a50-9429-1faba885d020" (UID: "33e5dd4b-468d-4a50-9429-1faba885d020"). InnerVolumeSpecName "kube-api-access-jf2mf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.626820 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "33e5dd4b-468d-4a50-9429-1faba885d020" (UID: "33e5dd4b-468d-4a50-9429-1faba885d020"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.640287 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-5p9bh" event={"ID":"b1cdbe43-1ad1-46dd-9349-b7322d4068a0","Type":"ContainerDied","Data":"632c85cbc9390b5df54b24dcecefaf5a2fe3b49b8a48bf0b959ca40ca7a53a9e"} Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.640353 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="632c85cbc9390b5df54b24dcecefaf5a2fe3b49b8a48bf0b959ca40ca7a53a9e" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.640439 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-5p9bh" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.684076 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerStarted","Data":"ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032"} Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.697779 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jf2mf\" (UniqueName: \"kubernetes.io/projected/33e5dd4b-468d-4a50-9429-1faba885d020-kube-api-access-jf2mf\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.697823 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.717464 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33e5dd4b-468d-4a50-9429-1faba885d020" (UID: "33e5dd4b-468d-4a50-9429-1faba885d020"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.733573 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data" (OuterVolumeSpecName: "config-data") pod "33e5dd4b-468d-4a50-9429-1faba885d020" (UID: "33e5dd4b-468d-4a50-9429-1faba885d020"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.784239 4767 scope.go:117] "RemoveContainer" containerID="56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" Jan 28 18:52:08 crc kubenswrapper[4767]: E0128 18:52:08.784907 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4\": container with ID starting with 56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4 not found: ID does not exist" containerID="56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.784948 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4"} err="failed to get container status \"56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4\": rpc error: code = NotFound desc = could not find container \"56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4\": container with ID starting with 56f8000c0138a4fb7acd9c9baa20f434c30ee61b5a2ba489e32209d32564cae4 not found: ID does not exist" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.804869 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.805183 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33e5dd4b-468d-4a50-9429-1faba885d020-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.974306 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-engine-59d5689489-z5swl"] Jan 28 18:52:08 crc kubenswrapper[4767]: I0128 18:52:08.992660 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-engine-59d5689489-z5swl"] Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.596456 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.701591 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.737583 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-operator-scripts\") pod \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.737649 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29sxm\" (UniqueName: \"kubernetes.io/projected/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-kube-api-access-29sxm\") pod \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\" (UID: \"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c\") " Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.754648 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c" (UID: "c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.755405 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8ec04a5-bc34-4006-b761-97437b8e5687","Type":"ContainerStarted","Data":"2586812cd64358f8158cf28c263c0dc088ff56571a97a5c52ede861b18a0601c"} Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.766418 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-f207-account-create-update-4rckz" event={"ID":"c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c","Type":"ContainerDied","Data":"882aba13f2dc74102cf6584d8992951581ad8824e1f53d388ba198c7d3e4a67e"} Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.766477 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="882aba13f2dc74102cf6584d8992951581ad8824e1f53d388ba198c7d3e4a67e" Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.766556 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-f207-account-create-update-4rckz" Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.782577 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-kube-api-access-29sxm" (OuterVolumeSpecName: "kube-api-access-29sxm") pod "c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c" (UID: "c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c"). InnerVolumeSpecName "kube-api-access-29sxm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.815720 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerStarted","Data":"1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1"} Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.843140 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.843190 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29sxm\" (UniqueName: \"kubernetes.io/projected/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c-kube-api-access-29sxm\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.858892 4767 generic.go:334] "Generic (PLEG): container finished" podID="46edc543-4bb1-408d-babc-b542091bafa8" containerID="48d6b4934cfcf5fb0462346dd6a9cb82e588d8d558c9f955d23dc8d2654d1c16" exitCode=0 Jan 28 18:52:09 crc kubenswrapper[4767]: I0128 18:52:09.858948 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"46edc543-4bb1-408d-babc-b542091bafa8","Type":"ContainerDied","Data":"48d6b4934cfcf5fb0462346dd6a9cb82e588d8d558c9f955d23dc8d2654d1c16"} Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.370984 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.388472 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.396323 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.463961 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-config-data\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464027 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cm7sk\" (UniqueName: \"kubernetes.io/projected/a673b46b-2808-4772-963d-7f2ae90be2fe-kube-api-access-cm7sk\") pod \"a673b46b-2808-4772-963d-7f2ae90be2fe\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464103 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8ckq\" (UniqueName: \"kubernetes.io/projected/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-kube-api-access-f8ckq\") pod \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464306 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a673b46b-2808-4772-963d-7f2ae90be2fe-operator-scripts\") pod \"a673b46b-2808-4772-963d-7f2ae90be2fe\" (UID: \"a673b46b-2808-4772-963d-7f2ae90be2fe\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464403 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-httpd-run\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464435 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2g947\" (UniqueName: \"kubernetes.io/projected/46edc543-4bb1-408d-babc-b542091bafa8-kube-api-access-2g947\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464472 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-scripts\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464504 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-logs\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464538 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-combined-ca-bundle\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464566 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-operator-scripts\") pod \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\" (UID: \"10bc51c5-1acb-4d5f-9fde-fe1b17388f51\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464603 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-internal-tls-certs\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.464640 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"46edc543-4bb1-408d-babc-b542091bafa8\" (UID: \"46edc543-4bb1-408d-babc-b542091bafa8\") " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.465594 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a673b46b-2808-4772-963d-7f2ae90be2fe-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a673b46b-2808-4772-963d-7f2ae90be2fe" (UID: "a673b46b-2808-4772-963d-7f2ae90be2fe"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.465847 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-logs" (OuterVolumeSpecName: "logs") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.465875 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.465990 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "10bc51c5-1acb-4d5f-9fde-fe1b17388f51" (UID: "10bc51c5-1acb-4d5f-9fde-fe1b17388f51"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.492671 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-kube-api-access-f8ckq" (OuterVolumeSpecName: "kube-api-access-f8ckq") pod "10bc51c5-1acb-4d5f-9fde-fe1b17388f51" (UID: "10bc51c5-1acb-4d5f-9fde-fe1b17388f51"). InnerVolumeSpecName "kube-api-access-f8ckq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.492839 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a673b46b-2808-4772-963d-7f2ae90be2fe-kube-api-access-cm7sk" (OuterVolumeSpecName: "kube-api-access-cm7sk") pod "a673b46b-2808-4772-963d-7f2ae90be2fe" (UID: "a673b46b-2808-4772-963d-7f2ae90be2fe"). InnerVolumeSpecName "kube-api-access-cm7sk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.528370 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46edc543-4bb1-408d-babc-b542091bafa8-kube-api-access-2g947" (OuterVolumeSpecName: "kube-api-access-2g947") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "kube-api-access-2g947". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.536367 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-scripts" (OuterVolumeSpecName: "scripts") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.551486 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "glance") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567393 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a673b46b-2808-4772-963d-7f2ae90be2fe-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567442 4767 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567456 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2g947\" (UniqueName: \"kubernetes.io/projected/46edc543-4bb1-408d-babc-b542091bafa8-kube-api-access-2g947\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567473 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567487 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/46edc543-4bb1-408d-babc-b542091bafa8-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567511 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567546 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567558 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cm7sk\" (UniqueName: \"kubernetes.io/projected/a673b46b-2808-4772-963d-7f2ae90be2fe-kube-api-access-cm7sk\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.567570 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8ckq\" (UniqueName: \"kubernetes.io/projected/10bc51c5-1acb-4d5f-9fde-fe1b17388f51-kube-api-access-f8ckq\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.649802 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.672424 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.772489 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-config-data" (OuterVolumeSpecName: "config-data") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.779273 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.795147 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.816730 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33e5dd4b-468d-4a50-9429-1faba885d020" path="/var/lib/kubelet/pods/33e5dd4b-468d-4a50-9429-1faba885d020/volumes" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.837455 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "46edc543-4bb1-408d-babc-b542091bafa8" (UID: "46edc543-4bb1-408d-babc-b542091bafa8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.883719 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.883764 4767 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/46edc543-4bb1-408d-babc-b542091bafa8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.885411 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" event={"ID":"a673b46b-2808-4772-963d-7f2ae90be2fe","Type":"ContainerDied","Data":"f2cfc3049c0f54fdf6fc4b3aecc809f5387bb2069a8f63b05b19e229c256ed47"} Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.885467 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f2cfc3049c0f54fdf6fc4b3aecc809f5387bb2069a8f63b05b19e229c256ed47" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.885571 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-2cb5-account-create-update-tccrf" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.903280 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" event={"ID":"10bc51c5-1acb-4d5f-9fde-fe1b17388f51","Type":"ContainerDied","Data":"9cf369c13216f2da54c9d9d7d600ceb3f6878173cd14ee32a6ac68c31acaead8"} Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.903347 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9cf369c13216f2da54c9d9d7d600ceb3f6878173cd14ee32a6ac68c31acaead8" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.903474 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-6d16-account-create-update-4r74q" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.909869 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"46edc543-4bb1-408d-babc-b542091bafa8","Type":"ContainerDied","Data":"fbcff4811c3f56de6183377d2b87e1b844845a4861e5169719ea4ba9bb1359fb"} Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.909938 4767 scope.go:117] "RemoveContainer" containerID="48d6b4934cfcf5fb0462346dd6a9cb82e588d8d558c9f955d23dc8d2654d1c16" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.910116 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.974597 4767 scope.go:117] "RemoveContainer" containerID="7a4e13758cf6c450608437b107663b30e9e6bf86e3f86eaf412130a7117966e2" Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.979019 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:52:10 crc kubenswrapper[4767]: I0128 18:52:10.997756 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.020255 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.020852 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b1cdbe43-1ad1-46dd-9349-b7322d4068a0" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.020878 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b1cdbe43-1ad1-46dd-9349-b7322d4068a0" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.020898 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4fecf89-74fc-47bd-83ff-876cc16e8dc8" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.020906 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4fecf89-74fc-47bd-83ff-876cc16e8dc8" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.020918 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-httpd" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.020925 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-httpd" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.020941 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33e5dd4b-468d-4a50-9429-1faba885d020" containerName="heat-engine" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.020949 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="33e5dd4b-468d-4a50-9429-1faba885d020" containerName="heat-engine" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.020964 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10bc51c5-1acb-4d5f-9fde-fe1b17388f51" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.020971 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="10bc51c5-1acb-4d5f-9fde-fe1b17388f51" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.020980 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.020986 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.021004 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-log" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021014 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-log" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.021026 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e93b8328-4f9c-47da-8af8-c86f5acf443b" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021032 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e93b8328-4f9c-47da-8af8-c86f5acf443b" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: E0128 18:52:11.021048 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a673b46b-2808-4772-963d-7f2ae90be2fe" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021055 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a673b46b-2808-4772-963d-7f2ae90be2fe" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021306 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="33e5dd4b-468d-4a50-9429-1faba885d020" containerName="heat-engine" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021324 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021335 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-log" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021344 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4fecf89-74fc-47bd-83ff-876cc16e8dc8" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021358 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e93b8328-4f9c-47da-8af8-c86f5acf443b" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021367 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b1cdbe43-1ad1-46dd-9349-b7322d4068a0" containerName="mariadb-database-create" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021381 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="10bc51c5-1acb-4d5f-9fde-fe1b17388f51" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021392 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="46edc543-4bb1-408d-babc-b542091bafa8" containerName="glance-httpd" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.021407 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a673b46b-2808-4772-963d-7f2ae90be2fe" containerName="mariadb-account-create-update" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.022741 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.027479 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.027800 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.057872 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.195755 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.195809 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v5vht\" (UniqueName: \"kubernetes.io/projected/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-kube-api-access-v5vht\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.195842 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.195955 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.195972 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.195988 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.196022 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.196321 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.299105 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.299178 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.299212 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.299303 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.299343 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.299465 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.299834 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.300145 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-logs\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.300494 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.307314 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.307739 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.308619 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.359915 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v5vht\" (UniqueName: \"kubernetes.io/projected/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-kube-api-access-v5vht\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.359993 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.368556 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.401600 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v5vht\" (UniqueName: \"kubernetes.io/projected/fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6-kube-api-access-v5vht\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.411991 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"glance-default-internal-api-0\" (UID: \"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6\") " pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.506470 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.959516 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8ec04a5-bc34-4006-b761-97437b8e5687","Type":"ContainerStarted","Data":"9325c4eab34c58788f4b819bf7f78afb4b44d8426db53a62dae021667f6a85e2"} Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.965691 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerStarted","Data":"3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9"} Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.965938 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-central-agent" containerID="cri-o://907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97" gracePeriod=30 Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.966051 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.966647 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="proxy-httpd" containerID="cri-o://3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9" gracePeriod=30 Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.966772 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-notification-agent" containerID="cri-o://ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032" gracePeriod=30 Jan 28 18:52:11 crc kubenswrapper[4767]: I0128 18:52:11.966774 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="sg-core" containerID="cri-o://1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1" gracePeriod=30 Jan 28 18:52:12 crc kubenswrapper[4767]: I0128 18:52:12.010844 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.860468184 podStartE2EDuration="9.010816295s" podCreationTimestamp="2026-01-28 18:52:03 +0000 UTC" firstStartedPulling="2026-01-28 18:52:05.026127509 +0000 UTC m=+1330.990310383" lastFinishedPulling="2026-01-28 18:52:10.17647562 +0000 UTC m=+1336.140658494" observedRunningTime="2026-01-28 18:52:12.005747346 +0000 UTC m=+1337.969930220" watchObservedRunningTime="2026-01-28 18:52:12.010816295 +0000 UTC m=+1337.974999169" Jan 28 18:52:12 crc kubenswrapper[4767]: I0128 18:52:12.267300 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 28 18:52:12 crc kubenswrapper[4767]: I0128 18:52:12.829013 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46edc543-4bb1-408d-babc-b542091bafa8" path="/var/lib/kubelet/pods/46edc543-4bb1-408d-babc-b542091bafa8/volumes" Jan 28 18:52:12 crc kubenswrapper[4767]: I0128 18:52:12.985785 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6","Type":"ContainerStarted","Data":"357ebf7ad22a066f9904cdf324cc5c5304c219c78c66e5f7cfc4fb28fb05a487"} Jan 28 18:52:12 crc kubenswrapper[4767]: I0128 18:52:12.987384 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6","Type":"ContainerStarted","Data":"d5118f3f597981e9e76c8e065d53fc04df29bfd7487664897ee0bea53d638a4e"} Jan 28 18:52:12 crc kubenswrapper[4767]: I0128 18:52:12.994494 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"a8ec04a5-bc34-4006-b761-97437b8e5687","Type":"ContainerStarted","Data":"0408c5b6cd2e577d0d26fa0b61d4b28889fbf46f8dadfd47a17e087f08f0dd02"} Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.012912 4767 generic.go:334] "Generic (PLEG): container finished" podID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerID="3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9" exitCode=0 Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.012967 4767 generic.go:334] "Generic (PLEG): container finished" podID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerID="1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1" exitCode=2 Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.012977 4767 generic.go:334] "Generic (PLEG): container finished" podID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerID="ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032" exitCode=0 Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.013008 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerDied","Data":"3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9"} Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.013069 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerDied","Data":"1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1"} Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.013084 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerDied","Data":"ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032"} Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.048558 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=7.048529852 podStartE2EDuration="7.048529852s" podCreationTimestamp="2026-01-28 18:52:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:13.036384802 +0000 UTC m=+1339.000567676" watchObservedRunningTime="2026-01-28 18:52:13.048529852 +0000 UTC m=+1339.012712726" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.319701 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2lrw9"] Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.321490 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.328563 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-zmthd" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.328906 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.329019 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.336812 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2lrw9"] Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.425691 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-config-data\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.425800 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.425913 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-scripts\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.425971 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mmf8d\" (UniqueName: \"kubernetes.io/projected/cb607654-d519-4a0c-bd79-337c1340c237-kube-api-access-mmf8d\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.528159 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-scripts\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.528355 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mmf8d\" (UniqueName: \"kubernetes.io/projected/cb607654-d519-4a0c-bd79-337c1340c237-kube-api-access-mmf8d\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.528414 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-config-data\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.528462 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.536008 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.543315 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-config-data\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.547864 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mmf8d\" (UniqueName: \"kubernetes.io/projected/cb607654-d519-4a0c-bd79-337c1340c237-kube-api-access-mmf8d\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.556607 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-scripts\") pod \"nova-cell0-conductor-db-sync-2lrw9\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:13 crc kubenswrapper[4767]: I0128 18:52:13.662250 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:14 crc kubenswrapper[4767]: I0128 18:52:14.036417 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6","Type":"ContainerStarted","Data":"01f55e5d2c22f0f110b26c56a2a7d6e8e8cd4356f8979533c18b9c9cd356f4e3"} Jan 28 18:52:14 crc kubenswrapper[4767]: I0128 18:52:14.073511 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.07347708 podStartE2EDuration="4.07347708s" podCreationTimestamp="2026-01-28 18:52:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:14.065666966 +0000 UTC m=+1340.029849860" watchObservedRunningTime="2026-01-28 18:52:14.07347708 +0000 UTC m=+1340.037659954" Jan 28 18:52:14 crc kubenswrapper[4767]: I0128 18:52:14.309965 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2lrw9"] Jan 28 18:52:15 crc kubenswrapper[4767]: I0128 18:52:15.073785 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" event={"ID":"cb607654-d519-4a0c-bd79-337c1340c237","Type":"ContainerStarted","Data":"68d26aba0511f43c29713a922bd11520816327bd41318ff607d319f3fee96c89"} Jan 28 18:52:16 crc kubenswrapper[4767]: E0128 18:52:16.732857 4767 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17b6b71a_81a9_4343_b582_0c75aeb65bb1.slice/crio-conmon-907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97.scope\": RecentStats: unable to find data in memory cache]" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.013762 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.104539 4767 generic.go:334] "Generic (PLEG): container finished" podID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerID="907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97" exitCode=0 Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.104599 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerDied","Data":"907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97"} Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.104633 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"17b6b71a-81a9-4343-b582-0c75aeb65bb1","Type":"ContainerDied","Data":"e9ba3c97638b8569e120a474eb876b69ae82c2efd1604dc5b826a9d848810148"} Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.104656 4767 scope.go:117] "RemoveContainer" containerID="3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.104817 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.122073 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vz9rb\" (UniqueName: \"kubernetes.io/projected/17b6b71a-81a9-4343-b582-0c75aeb65bb1-kube-api-access-vz9rb\") pod \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.122149 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-log-httpd\") pod \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.122278 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-config-data\") pod \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.122308 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-sg-core-conf-yaml\") pod \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.122333 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-combined-ca-bundle\") pod \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.122948 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "17b6b71a-81a9-4343-b582-0c75aeb65bb1" (UID: "17b6b71a-81a9-4343-b582-0c75aeb65bb1"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.123406 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-scripts\") pod \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.123607 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-run-httpd\") pod \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\" (UID: \"17b6b71a-81a9-4343-b582-0c75aeb65bb1\") " Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.124524 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.124764 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "17b6b71a-81a9-4343-b582-0c75aeb65bb1" (UID: "17b6b71a-81a9-4343-b582-0c75aeb65bb1"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.130271 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-scripts" (OuterVolumeSpecName: "scripts") pod "17b6b71a-81a9-4343-b582-0c75aeb65bb1" (UID: "17b6b71a-81a9-4343-b582-0c75aeb65bb1"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.134855 4767 scope.go:117] "RemoveContainer" containerID="1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.160156 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17b6b71a-81a9-4343-b582-0c75aeb65bb1-kube-api-access-vz9rb" (OuterVolumeSpecName: "kube-api-access-vz9rb") pod "17b6b71a-81a9-4343-b582-0c75aeb65bb1" (UID: "17b6b71a-81a9-4343-b582-0c75aeb65bb1"). InnerVolumeSpecName "kube-api-access-vz9rb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.167752 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "17b6b71a-81a9-4343-b582-0c75aeb65bb1" (UID: "17b6b71a-81a9-4343-b582-0c75aeb65bb1"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.214272 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17b6b71a-81a9-4343-b582-0c75aeb65bb1" (UID: "17b6b71a-81a9-4343-b582-0c75aeb65bb1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.227030 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/17b6b71a-81a9-4343-b582-0c75aeb65bb1-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.227242 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vz9rb\" (UniqueName: \"kubernetes.io/projected/17b6b71a-81a9-4343-b582-0c75aeb65bb1-kube-api-access-vz9rb\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.227309 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.227373 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.227431 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.255908 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-config-data" (OuterVolumeSpecName: "config-data") pod "17b6b71a-81a9-4343-b582-0c75aeb65bb1" (UID: "17b6b71a-81a9-4343-b582-0c75aeb65bb1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.263345 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.284626 4767 scope.go:117] "RemoveContainer" containerID="ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.330243 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17b6b71a-81a9-4343-b582-0c75aeb65bb1-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.334775 4767 scope.go:117] "RemoveContainer" containerID="907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.342601 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.344297 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.390783 4767 scope.go:117] "RemoveContainer" containerID="3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9" Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.395993 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9\": container with ID starting with 3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9 not found: ID does not exist" containerID="3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.396104 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9"} err="failed to get container status \"3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9\": rpc error: code = NotFound desc = could not find container \"3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9\": container with ID starting with 3e97758e6553aa4ebd73b291dbe5325f480be85a145d74ef81d39b5fd039a1d9 not found: ID does not exist" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.396207 4767 scope.go:117] "RemoveContainer" containerID="1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1" Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.396988 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1\": container with ID starting with 1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1 not found: ID does not exist" containerID="1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.397012 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1"} err="failed to get container status \"1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1\": rpc error: code = NotFound desc = could not find container \"1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1\": container with ID starting with 1aa085209534392ce2de34368a468c40452cdb9c9a4a0115773435c32c9d1ca1 not found: ID does not exist" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.397032 4767 scope.go:117] "RemoveContainer" containerID="ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032" Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.399286 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032\": container with ID starting with ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032 not found: ID does not exist" containerID="ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.399317 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032"} err="failed to get container status \"ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032\": rpc error: code = NotFound desc = could not find container \"ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032\": container with ID starting with ea09d097ca2438873df4ffc41261ab50812a1ec5f7481c65efc965e5688dd032 not found: ID does not exist" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.399334 4767 scope.go:117] "RemoveContainer" containerID="907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.399838 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.403996 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.404468 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97\": container with ID starting with 907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97 not found: ID does not exist" containerID="907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.404517 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97"} err="failed to get container status \"907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97\": rpc error: code = NotFound desc = could not find container \"907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97\": container with ID starting with 907f572c0879c78349cc767400d22a5731e6cc7cbe531b087592c720e83ffa97 not found: ID does not exist" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.510752 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.540366 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.575402 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.576457 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-notification-agent" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.576490 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-notification-agent" Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.576505 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="sg-core" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.576513 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="sg-core" Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.576529 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-central-agent" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.576536 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-central-agent" Jan 28 18:52:17 crc kubenswrapper[4767]: E0128 18:52:17.576584 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="proxy-httpd" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.576592 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="proxy-httpd" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.577032 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="sg-core" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.577052 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-notification-agent" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.577063 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="ceilometer-central-agent" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.577072 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" containerName="proxy-httpd" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.579500 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.586096 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.586444 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.601783 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.645958 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-config-data\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.646011 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-log-httpd\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.646086 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jr2q\" (UniqueName: \"kubernetes.io/projected/db2abb36-14d9-4986-ac53-33b66f1cc7df-kube-api-access-6jr2q\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.646108 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.646144 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-run-httpd\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.646172 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-scripts\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.646237 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.747990 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-config-data\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.748044 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-log-httpd\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.748126 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jr2q\" (UniqueName: \"kubernetes.io/projected/db2abb36-14d9-4986-ac53-33b66f1cc7df-kube-api-access-6jr2q\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.748150 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.748183 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-run-httpd\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.748230 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-scripts\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.748278 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.750445 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-log-httpd\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.750465 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-run-httpd\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.756503 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-config-data\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.763005 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-scripts\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.764701 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.773134 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.775647 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jr2q\" (UniqueName: \"kubernetes.io/projected/db2abb36-14d9-4986-ac53-33b66f1cc7df-kube-api-access-6jr2q\") pod \"ceilometer-0\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " pod="openstack/ceilometer-0" Jan 28 18:52:17 crc kubenswrapper[4767]: I0128 18:52:17.923946 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:18 crc kubenswrapper[4767]: I0128 18:52:18.125375 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 18:52:18 crc kubenswrapper[4767]: I0128 18:52:18.125819 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 28 18:52:18 crc kubenswrapper[4767]: I0128 18:52:18.411297 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:18 crc kubenswrapper[4767]: W0128 18:52:18.425922 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb2abb36_14d9_4986_ac53_33b66f1cc7df.slice/crio-6e9c94e04b159a746a8585a4ff1a50690e3b72b687d19433010158c622f36488 WatchSource:0}: Error finding container 6e9c94e04b159a746a8585a4ff1a50690e3b72b687d19433010158c622f36488: Status 404 returned error can't find the container with id 6e9c94e04b159a746a8585a4ff1a50690e3b72b687d19433010158c622f36488 Jan 28 18:52:18 crc kubenswrapper[4767]: I0128 18:52:18.811850 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17b6b71a-81a9-4343-b582-0c75aeb65bb1" path="/var/lib/kubelet/pods/17b6b71a-81a9-4343-b582-0c75aeb65bb1/volumes" Jan 28 18:52:19 crc kubenswrapper[4767]: I0128 18:52:19.143227 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerStarted","Data":"6e9c94e04b159a746a8585a4ff1a50690e3b72b687d19433010158c622f36488"} Jan 28 18:52:20 crc kubenswrapper[4767]: I0128 18:52:20.096896 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-678958868f-sc9dm" Jan 28 18:52:20 crc kubenswrapper[4767]: I0128 18:52:20.200346 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6b85bcdd44-j7nc5"] Jan 28 18:52:20 crc kubenswrapper[4767]: I0128 18:52:20.200654 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6b85bcdd44-j7nc5" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-api" containerID="cri-o://dba4659dc85b26e12ae70c25ded3b5737cc1016d59e2c35f465db8f18ca60614" gracePeriod=30 Jan 28 18:52:20 crc kubenswrapper[4767]: I0128 18:52:20.201234 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6b85bcdd44-j7nc5" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-httpd" containerID="cri-o://30f6cba37c598c6e0290d4657113db77f14c9572cdd524b80f3cae6218895496" gracePeriod=30 Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.176768 4767 generic.go:334] "Generic (PLEG): container finished" podID="bb57e040-6449-4416-87a1-776751f75752" containerID="30f6cba37c598c6e0290d4657113db77f14c9572cdd524b80f3cae6218895496" exitCode=0 Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.177273 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b85bcdd44-j7nc5" event={"ID":"bb57e040-6449-4416-87a1-776751f75752","Type":"ContainerDied","Data":"30f6cba37c598c6e0290d4657113db77f14c9572cdd524b80f3cae6218895496"} Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.226023 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.226230 4767 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.227277 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.508216 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.508343 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.570564 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:21 crc kubenswrapper[4767]: I0128 18:52:21.572351 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:22 crc kubenswrapper[4767]: I0128 18:52:22.020636 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:22 crc kubenswrapper[4767]: I0128 18:52:22.188627 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:22 crc kubenswrapper[4767]: I0128 18:52:22.188705 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:24 crc kubenswrapper[4767]: I0128 18:52:24.786911 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:24 crc kubenswrapper[4767]: I0128 18:52:24.787503 4767 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 28 18:52:24 crc kubenswrapper[4767]: I0128 18:52:24.792871 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.293165 4767 generic.go:334] "Generic (PLEG): container finished" podID="bb57e040-6449-4416-87a1-776751f75752" containerID="dba4659dc85b26e12ae70c25ded3b5737cc1016d59e2c35f465db8f18ca60614" exitCode=0 Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.293645 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b85bcdd44-j7nc5" event={"ID":"bb57e040-6449-4416-87a1-776751f75752","Type":"ContainerDied","Data":"dba4659dc85b26e12ae70c25ded3b5737cc1016d59e2c35f465db8f18ca60614"} Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.425298 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.605603 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-combined-ca-bundle\") pod \"bb57e040-6449-4416-87a1-776751f75752\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.605745 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8bdn\" (UniqueName: \"kubernetes.io/projected/bb57e040-6449-4416-87a1-776751f75752-kube-api-access-h8bdn\") pod \"bb57e040-6449-4416-87a1-776751f75752\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.605810 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-ovndb-tls-certs\") pod \"bb57e040-6449-4416-87a1-776751f75752\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.605925 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-config\") pod \"bb57e040-6449-4416-87a1-776751f75752\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.606013 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-httpd-config\") pod \"bb57e040-6449-4416-87a1-776751f75752\" (UID: \"bb57e040-6449-4416-87a1-776751f75752\") " Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.617269 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb57e040-6449-4416-87a1-776751f75752-kube-api-access-h8bdn" (OuterVolumeSpecName: "kube-api-access-h8bdn") pod "bb57e040-6449-4416-87a1-776751f75752" (UID: "bb57e040-6449-4416-87a1-776751f75752"). InnerVolumeSpecName "kube-api-access-h8bdn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.622987 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "bb57e040-6449-4416-87a1-776751f75752" (UID: "bb57e040-6449-4416-87a1-776751f75752"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.695545 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-config" (OuterVolumeSpecName: "config") pod "bb57e040-6449-4416-87a1-776751f75752" (UID: "bb57e040-6449-4416-87a1-776751f75752"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.709554 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bb57e040-6449-4416-87a1-776751f75752" (UID: "bb57e040-6449-4416-87a1-776751f75752"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.711281 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.711306 4767 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.711316 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.711328 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8bdn\" (UniqueName: \"kubernetes.io/projected/bb57e040-6449-4416-87a1-776751f75752-kube-api-access-h8bdn\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.761472 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "bb57e040-6449-4416-87a1-776751f75752" (UID: "bb57e040-6449-4416-87a1-776751f75752"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:26 crc kubenswrapper[4767]: I0128 18:52:26.813312 4767 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bb57e040-6449-4416-87a1-776751f75752-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.307501 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerStarted","Data":"957325b13755878a391f879b403d2c4ef4819c99d1f10c3bc7a739be9e61876f"} Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.308035 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerStarted","Data":"281802edc7d67f57b6e7d97be4cd95200a92cd5bace9aa824a127dd49f65a901"} Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.311673 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" event={"ID":"cb607654-d519-4a0c-bd79-337c1340c237","Type":"ContainerStarted","Data":"c3a86a892dbd1b355c3a23b4a5db80f85ad44aec4988db387b065de363b1033e"} Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.314391 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6b85bcdd44-j7nc5" event={"ID":"bb57e040-6449-4416-87a1-776751f75752","Type":"ContainerDied","Data":"a5f0a72c454ef5abb176011aafa794d0e59a46a22a7bf0e4f97538ff9b6bf5c5"} Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.314445 4767 scope.go:117] "RemoveContainer" containerID="30f6cba37c598c6e0290d4657113db77f14c9572cdd524b80f3cae6218895496" Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.314446 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6b85bcdd44-j7nc5" Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.360008 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" podStartSLOduration=2.677975087 podStartE2EDuration="14.359961707s" podCreationTimestamp="2026-01-28 18:52:13 +0000 UTC" firstStartedPulling="2026-01-28 18:52:14.325775399 +0000 UTC m=+1340.289958283" lastFinishedPulling="2026-01-28 18:52:26.007762029 +0000 UTC m=+1351.971944903" observedRunningTime="2026-01-28 18:52:27.330005372 +0000 UTC m=+1353.294188246" watchObservedRunningTime="2026-01-28 18:52:27.359961707 +0000 UTC m=+1353.324144581" Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.365226 4767 scope.go:117] "RemoveContainer" containerID="dba4659dc85b26e12ae70c25ded3b5737cc1016d59e2c35f465db8f18ca60614" Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.386298 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6b85bcdd44-j7nc5"] Jan 28 18:52:27 crc kubenswrapper[4767]: I0128 18:52:27.404623 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6b85bcdd44-j7nc5"] Jan 28 18:52:28 crc kubenswrapper[4767]: I0128 18:52:28.336499 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerStarted","Data":"bb0372dba2c29faaa0f6987682a8e5a1a5782a7a90b4452ebdf1bcd2bd1fdb27"} Jan 28 18:52:28 crc kubenswrapper[4767]: I0128 18:52:28.810182 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb57e040-6449-4416-87a1-776751f75752" path="/var/lib/kubelet/pods/bb57e040-6449-4416-87a1-776751f75752/volumes" Jan 28 18:52:30 crc kubenswrapper[4767]: I0128 18:52:30.365294 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerStarted","Data":"44c4218df0574ae7390dda64aee8dc546eb72069757f4b9a981744349765d759"} Jan 28 18:52:30 crc kubenswrapper[4767]: I0128 18:52:30.365689 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-central-agent" containerID="cri-o://281802edc7d67f57b6e7d97be4cd95200a92cd5bace9aa824a127dd49f65a901" gracePeriod=30 Jan 28 18:52:30 crc kubenswrapper[4767]: I0128 18:52:30.365936 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:52:30 crc kubenswrapper[4767]: I0128 18:52:30.365988 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="proxy-httpd" containerID="cri-o://44c4218df0574ae7390dda64aee8dc546eb72069757f4b9a981744349765d759" gracePeriod=30 Jan 28 18:52:30 crc kubenswrapper[4767]: I0128 18:52:30.366120 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="sg-core" containerID="cri-o://bb0372dba2c29faaa0f6987682a8e5a1a5782a7a90b4452ebdf1bcd2bd1fdb27" gracePeriod=30 Jan 28 18:52:30 crc kubenswrapper[4767]: I0128 18:52:30.366173 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-notification-agent" containerID="cri-o://957325b13755878a391f879b403d2c4ef4819c99d1f10c3bc7a739be9e61876f" gracePeriod=30 Jan 28 18:52:30 crc kubenswrapper[4767]: I0128 18:52:30.394645 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.574594915 podStartE2EDuration="13.394621937s" podCreationTimestamp="2026-01-28 18:52:17 +0000 UTC" firstStartedPulling="2026-01-28 18:52:18.430501706 +0000 UTC m=+1344.394684580" lastFinishedPulling="2026-01-28 18:52:29.250528728 +0000 UTC m=+1355.214711602" observedRunningTime="2026-01-28 18:52:30.391586242 +0000 UTC m=+1356.355769116" watchObservedRunningTime="2026-01-28 18:52:30.394621937 +0000 UTC m=+1356.358804811" Jan 28 18:52:31 crc kubenswrapper[4767]: I0128 18:52:31.378916 4767 generic.go:334] "Generic (PLEG): container finished" podID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerID="44c4218df0574ae7390dda64aee8dc546eb72069757f4b9a981744349765d759" exitCode=0 Jan 28 18:52:31 crc kubenswrapper[4767]: I0128 18:52:31.378994 4767 generic.go:334] "Generic (PLEG): container finished" podID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerID="bb0372dba2c29faaa0f6987682a8e5a1a5782a7a90b4452ebdf1bcd2bd1fdb27" exitCode=2 Jan 28 18:52:31 crc kubenswrapper[4767]: I0128 18:52:31.378992 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerDied","Data":"44c4218df0574ae7390dda64aee8dc546eb72069757f4b9a981744349765d759"} Jan 28 18:52:31 crc kubenswrapper[4767]: I0128 18:52:31.379065 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerDied","Data":"bb0372dba2c29faaa0f6987682a8e5a1a5782a7a90b4452ebdf1bcd2bd1fdb27"} Jan 28 18:52:31 crc kubenswrapper[4767]: I0128 18:52:31.379088 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerDied","Data":"957325b13755878a391f879b403d2c4ef4819c99d1f10c3bc7a739be9e61876f"} Jan 28 18:52:31 crc kubenswrapper[4767]: I0128 18:52:31.379011 4767 generic.go:334] "Generic (PLEG): container finished" podID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerID="957325b13755878a391f879b403d2c4ef4819c99d1f10c3bc7a739be9e61876f" exitCode=0 Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.467091 4767 generic.go:334] "Generic (PLEG): container finished" podID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerID="281802edc7d67f57b6e7d97be4cd95200a92cd5bace9aa824a127dd49f65a901" exitCode=0 Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.471064 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerDied","Data":"281802edc7d67f57b6e7d97be4cd95200a92cd5bace9aa824a127dd49f65a901"} Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.728224 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.796014 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-sg-core-conf-yaml\") pod \"db2abb36-14d9-4986-ac53-33b66f1cc7df\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.796156 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-scripts\") pod \"db2abb36-14d9-4986-ac53-33b66f1cc7df\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.796234 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-log-httpd\") pod \"db2abb36-14d9-4986-ac53-33b66f1cc7df\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.796420 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jr2q\" (UniqueName: \"kubernetes.io/projected/db2abb36-14d9-4986-ac53-33b66f1cc7df-kube-api-access-6jr2q\") pod \"db2abb36-14d9-4986-ac53-33b66f1cc7df\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.796511 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-combined-ca-bundle\") pod \"db2abb36-14d9-4986-ac53-33b66f1cc7df\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.796543 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-config-data\") pod \"db2abb36-14d9-4986-ac53-33b66f1cc7df\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.796578 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-run-httpd\") pod \"db2abb36-14d9-4986-ac53-33b66f1cc7df\" (UID: \"db2abb36-14d9-4986-ac53-33b66f1cc7df\") " Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.797648 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "db2abb36-14d9-4986-ac53-33b66f1cc7df" (UID: "db2abb36-14d9-4986-ac53-33b66f1cc7df"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.797758 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "db2abb36-14d9-4986-ac53-33b66f1cc7df" (UID: "db2abb36-14d9-4986-ac53-33b66f1cc7df"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.822923 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-scripts" (OuterVolumeSpecName: "scripts") pod "db2abb36-14d9-4986-ac53-33b66f1cc7df" (UID: "db2abb36-14d9-4986-ac53-33b66f1cc7df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.841838 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db2abb36-14d9-4986-ac53-33b66f1cc7df-kube-api-access-6jr2q" (OuterVolumeSpecName: "kube-api-access-6jr2q") pod "db2abb36-14d9-4986-ac53-33b66f1cc7df" (UID: "db2abb36-14d9-4986-ac53-33b66f1cc7df"). InnerVolumeSpecName "kube-api-access-6jr2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.891623 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "db2abb36-14d9-4986-ac53-33b66f1cc7df" (UID: "db2abb36-14d9-4986-ac53-33b66f1cc7df"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.899127 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jr2q\" (UniqueName: \"kubernetes.io/projected/db2abb36-14d9-4986-ac53-33b66f1cc7df-kube-api-access-6jr2q\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.899180 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.899195 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.899224 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.899254 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/db2abb36-14d9-4986-ac53-33b66f1cc7df-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.963115 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "db2abb36-14d9-4986-ac53-33b66f1cc7df" (UID: "db2abb36-14d9-4986-ac53-33b66f1cc7df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:38 crc kubenswrapper[4767]: I0128 18:52:38.997679 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-config-data" (OuterVolumeSpecName: "config-data") pod "db2abb36-14d9-4986-ac53-33b66f1cc7df" (UID: "db2abb36-14d9-4986-ac53-33b66f1cc7df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.001424 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.001475 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/db2abb36-14d9-4986-ac53-33b66f1cc7df-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.480851 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"db2abb36-14d9-4986-ac53-33b66f1cc7df","Type":"ContainerDied","Data":"6e9c94e04b159a746a8585a4ff1a50690e3b72b687d19433010158c622f36488"} Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.480928 4767 scope.go:117] "RemoveContainer" containerID="44c4218df0574ae7390dda64aee8dc546eb72069757f4b9a981744349765d759" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.481132 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.517979 4767 scope.go:117] "RemoveContainer" containerID="bb0372dba2c29faaa0f6987682a8e5a1a5782a7a90b4452ebdf1bcd2bd1fdb27" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.540791 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.541750 4767 scope.go:117] "RemoveContainer" containerID="957325b13755878a391f879b403d2c4ef4819c99d1f10c3bc7a739be9e61876f" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.555243 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.596061 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:39 crc kubenswrapper[4767]: E0128 18:52:39.596829 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="proxy-httpd" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.596866 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="proxy-httpd" Jan 28 18:52:39 crc kubenswrapper[4767]: E0128 18:52:39.596905 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-notification-agent" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.596914 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-notification-agent" Jan 28 18:52:39 crc kubenswrapper[4767]: E0128 18:52:39.596937 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-central-agent" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.596949 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-central-agent" Jan 28 18:52:39 crc kubenswrapper[4767]: E0128 18:52:39.596960 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="sg-core" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.596969 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="sg-core" Jan 28 18:52:39 crc kubenswrapper[4767]: E0128 18:52:39.596985 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-httpd" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.596999 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-httpd" Jan 28 18:52:39 crc kubenswrapper[4767]: E0128 18:52:39.597013 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-api" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.597024 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-api" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.597379 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-httpd" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.597403 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-central-agent" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.597415 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="proxy-httpd" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.597429 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="sg-core" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.597439 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb57e040-6449-4416-87a1-776751f75752" containerName="neutron-api" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.597449 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" containerName="ceilometer-notification-agent" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.613353 4767 scope.go:117] "RemoveContainer" containerID="281802edc7d67f57b6e7d97be4cd95200a92cd5bace9aa824a127dd49f65a901" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.622679 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.628113 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.628514 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.633459 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.718250 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jx6gx\" (UniqueName: \"kubernetes.io/projected/e755a14d-bf9b-454d-aab8-ca4c790a553a-kube-api-access-jx6gx\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.718333 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-scripts\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.718393 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.718412 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-config-data\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.718446 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.718487 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-run-httpd\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.718533 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-log-httpd\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.820630 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jx6gx\" (UniqueName: \"kubernetes.io/projected/e755a14d-bf9b-454d-aab8-ca4c790a553a-kube-api-access-jx6gx\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.821423 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-scripts\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.821528 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.821574 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-config-data\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.821638 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.821757 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-run-httpd\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.821844 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-log-httpd\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.822839 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-run-httpd\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.823773 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-log-httpd\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.828830 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-scripts\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.829961 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.831123 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-config-data\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.834634 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.848503 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jx6gx\" (UniqueName: \"kubernetes.io/projected/e755a14d-bf9b-454d-aab8-ca4c790a553a-kube-api-access-jx6gx\") pod \"ceilometer-0\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " pod="openstack/ceilometer-0" Jan 28 18:52:39 crc kubenswrapper[4767]: I0128 18:52:39.957519 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:40 crc kubenswrapper[4767]: I0128 18:52:40.516943 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:40 crc kubenswrapper[4767]: W0128 18:52:40.526824 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode755a14d_bf9b_454d_aab8_ca4c790a553a.slice/crio-2c1e8c32829f1fd52ba42fbfebefb654e64cfb4ba9867a3a216e11461927e72c WatchSource:0}: Error finding container 2c1e8c32829f1fd52ba42fbfebefb654e64cfb4ba9867a3a216e11461927e72c: Status 404 returned error can't find the container with id 2c1e8c32829f1fd52ba42fbfebefb654e64cfb4ba9867a3a216e11461927e72c Jan 28 18:52:40 crc kubenswrapper[4767]: I0128 18:52:40.808770 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db2abb36-14d9-4986-ac53-33b66f1cc7df" path="/var/lib/kubelet/pods/db2abb36-14d9-4986-ac53-33b66f1cc7df/volumes" Jan 28 18:52:41 crc kubenswrapper[4767]: I0128 18:52:41.515043 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerStarted","Data":"bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03"} Jan 28 18:52:41 crc kubenswrapper[4767]: I0128 18:52:41.515560 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerStarted","Data":"2c1e8c32829f1fd52ba42fbfebefb654e64cfb4ba9867a3a216e11461927e72c"} Jan 28 18:52:42 crc kubenswrapper[4767]: I0128 18:52:42.716031 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:43 crc kubenswrapper[4767]: I0128 18:52:43.535863 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerStarted","Data":"732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd"} Jan 28 18:52:43 crc kubenswrapper[4767]: I0128 18:52:43.536300 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerStarted","Data":"cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab"} Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.454935 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.455888 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.569538 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerStarted","Data":"567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6"} Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.569988 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.570070 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-notification-agent" containerID="cri-o://cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab" gracePeriod=30 Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.570034 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="proxy-httpd" containerID="cri-o://567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6" gracePeriod=30 Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.570051 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="sg-core" containerID="cri-o://732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd" gracePeriod=30 Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.569976 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-central-agent" containerID="cri-o://bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03" gracePeriod=30 Jan 28 18:52:45 crc kubenswrapper[4767]: I0128 18:52:45.613202 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.346077901 podStartE2EDuration="6.613177084s" podCreationTimestamp="2026-01-28 18:52:39 +0000 UTC" firstStartedPulling="2026-01-28 18:52:40.530092269 +0000 UTC m=+1366.494275143" lastFinishedPulling="2026-01-28 18:52:44.797191452 +0000 UTC m=+1370.761374326" observedRunningTime="2026-01-28 18:52:45.600707322 +0000 UTC m=+1371.564890216" watchObservedRunningTime="2026-01-28 18:52:45.613177084 +0000 UTC m=+1371.577359958" Jan 28 18:52:46 crc kubenswrapper[4767]: I0128 18:52:46.584226 4767 generic.go:334] "Generic (PLEG): container finished" podID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerID="567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6" exitCode=0 Jan 28 18:52:46 crc kubenswrapper[4767]: I0128 18:52:46.584752 4767 generic.go:334] "Generic (PLEG): container finished" podID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerID="732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd" exitCode=2 Jan 28 18:52:46 crc kubenswrapper[4767]: I0128 18:52:46.584762 4767 generic.go:334] "Generic (PLEG): container finished" podID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerID="cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab" exitCode=0 Jan 28 18:52:46 crc kubenswrapper[4767]: I0128 18:52:46.584278 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerDied","Data":"567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6"} Jan 28 18:52:46 crc kubenswrapper[4767]: I0128 18:52:46.584799 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerDied","Data":"732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd"} Jan 28 18:52:46 crc kubenswrapper[4767]: I0128 18:52:46.584818 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerDied","Data":"cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab"} Jan 28 18:52:47 crc kubenswrapper[4767]: E0128 18:52:47.598559 4767 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcb607654_d519_4a0c_bd79_337c1340c237.slice/crio-conmon-c3a86a892dbd1b355c3a23b4a5db80f85ad44aec4988db387b065de363b1033e.scope\": RecentStats: unable to find data in memory cache]" Jan 28 18:52:47 crc kubenswrapper[4767]: I0128 18:52:47.601093 4767 generic.go:334] "Generic (PLEG): container finished" podID="cb607654-d519-4a0c-bd79-337c1340c237" containerID="c3a86a892dbd1b355c3a23b4a5db80f85ad44aec4988db387b065de363b1033e" exitCode=0 Jan 28 18:52:47 crc kubenswrapper[4767]: I0128 18:52:47.601161 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" event={"ID":"cb607654-d519-4a0c-bd79-337c1340c237","Type":"ContainerDied","Data":"c3a86a892dbd1b355c3a23b4a5db80f85ad44aec4988db387b065de363b1033e"} Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.034633 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.147329 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mmf8d\" (UniqueName: \"kubernetes.io/projected/cb607654-d519-4a0c-bd79-337c1340c237-kube-api-access-mmf8d\") pod \"cb607654-d519-4a0c-bd79-337c1340c237\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.147561 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-config-data\") pod \"cb607654-d519-4a0c-bd79-337c1340c237\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.147608 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-scripts\") pod \"cb607654-d519-4a0c-bd79-337c1340c237\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.147747 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-combined-ca-bundle\") pod \"cb607654-d519-4a0c-bd79-337c1340c237\" (UID: \"cb607654-d519-4a0c-bd79-337c1340c237\") " Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.167466 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-scripts" (OuterVolumeSpecName: "scripts") pod "cb607654-d519-4a0c-bd79-337c1340c237" (UID: "cb607654-d519-4a0c-bd79-337c1340c237"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.167635 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb607654-d519-4a0c-bd79-337c1340c237-kube-api-access-mmf8d" (OuterVolumeSpecName: "kube-api-access-mmf8d") pod "cb607654-d519-4a0c-bd79-337c1340c237" (UID: "cb607654-d519-4a0c-bd79-337c1340c237"). InnerVolumeSpecName "kube-api-access-mmf8d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.184271 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cb607654-d519-4a0c-bd79-337c1340c237" (UID: "cb607654-d519-4a0c-bd79-337c1340c237"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.185587 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-config-data" (OuterVolumeSpecName: "config-data") pod "cb607654-d519-4a0c-bd79-337c1340c237" (UID: "cb607654-d519-4a0c-bd79-337c1340c237"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.250328 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.250373 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.250385 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cb607654-d519-4a0c-bd79-337c1340c237-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.250400 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mmf8d\" (UniqueName: \"kubernetes.io/projected/cb607654-d519-4a0c-bd79-337c1340c237-kube-api-access-mmf8d\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.641536 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" event={"ID":"cb607654-d519-4a0c-bd79-337c1340c237","Type":"ContainerDied","Data":"68d26aba0511f43c29713a922bd11520816327bd41318ff607d319f3fee96c89"} Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.641983 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="68d26aba0511f43c29713a922bd11520816327bd41318ff607d319f3fee96c89" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.641903 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-2lrw9" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.813006 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:52:49 crc kubenswrapper[4767]: E0128 18:52:49.813503 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb607654-d519-4a0c-bd79-337c1340c237" containerName="nova-cell0-conductor-db-sync" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.813522 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb607654-d519-4a0c-bd79-337c1340c237" containerName="nova-cell0-conductor-db-sync" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.813719 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb607654-d519-4a0c-bd79-337c1340c237" containerName="nova-cell0-conductor-db-sync" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.820149 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.822802 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.823513 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-zmthd" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.830447 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.862961 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jwf6\" (UniqueName: \"kubernetes.io/projected/074dd138-8468-472b-a70e-e3792dd6a582-kube-api-access-5jwf6\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.863056 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.863100 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.965746 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jwf6\" (UniqueName: \"kubernetes.io/projected/074dd138-8468-472b-a70e-e3792dd6a582-kube-api-access-5jwf6\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.965850 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.965965 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.983798 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:49 crc kubenswrapper[4767]: I0128 18:52:49.988709 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jwf6\" (UniqueName: \"kubernetes.io/projected/074dd138-8468-472b-a70e-e3792dd6a582-kube-api-access-5jwf6\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:49.998684 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.159108 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.269661 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.374347 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data-custom\") pod \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.374526 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data\") pod \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.374586 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-combined-ca-bundle\") pod \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.374671 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jns9\" (UniqueName: \"kubernetes.io/projected/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-kube-api-access-5jns9\") pod \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\" (UID: \"f385fd5b-9b44-4d64-b9a5-39ffddab1c34\") " Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.384487 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "f385fd5b-9b44-4d64-b9a5-39ffddab1c34" (UID: "f385fd5b-9b44-4d64-b9a5-39ffddab1c34"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.388548 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-kube-api-access-5jns9" (OuterVolumeSpecName: "kube-api-access-5jns9") pod "f385fd5b-9b44-4d64-b9a5-39ffddab1c34" (UID: "f385fd5b-9b44-4d64-b9a5-39ffddab1c34"). InnerVolumeSpecName "kube-api-access-5jns9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.426962 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f385fd5b-9b44-4d64-b9a5-39ffddab1c34" (UID: "f385fd5b-9b44-4d64-b9a5-39ffddab1c34"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.445884 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data" (OuterVolumeSpecName: "config-data") pod "f385fd5b-9b44-4d64-b9a5-39ffddab1c34" (UID: "f385fd5b-9b44-4d64-b9a5-39ffddab1c34"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.477990 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.478533 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.478547 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jns9\" (UniqueName: \"kubernetes.io/projected/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-kube-api-access-5jns9\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.478556 4767 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/f385fd5b-9b44-4d64-b9a5-39ffddab1c34-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.653336 4767 generic.go:334] "Generic (PLEG): container finished" podID="f385fd5b-9b44-4d64-b9a5-39ffddab1c34" containerID="156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9" exitCode=137 Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.653395 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.653394 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" event={"ID":"f385fd5b-9b44-4d64-b9a5-39ffddab1c34","Type":"ContainerDied","Data":"156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9"} Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.653529 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/heat-cfnapi-56cbd47b65-wxqrt" event={"ID":"f385fd5b-9b44-4d64-b9a5-39ffddab1c34","Type":"ContainerDied","Data":"5e91d3c559a8390fc0995ab85fb2831f2eb137bdb464a473fafe40a1bf2a4fd4"} Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.653555 4767 scope.go:117] "RemoveContainer" containerID="156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.685528 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.687134 4767 scope.go:117] "RemoveContainer" containerID="156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9" Jan 28 18:52:50 crc kubenswrapper[4767]: E0128 18:52:50.687698 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9\": container with ID starting with 156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9 not found: ID does not exist" containerID="156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.687750 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9"} err="failed to get container status \"156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9\": rpc error: code = NotFound desc = could not find container \"156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9\": container with ID starting with 156f32a4aa2766efb3a37f1a4bcd7a08ed82c1ae32c6acfde60304c8915066d9 not found: ID does not exist" Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.703718 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-cfnapi-56cbd47b65-wxqrt"] Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.714535 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-cfnapi-56cbd47b65-wxqrt"] Jan 28 18:52:50 crc kubenswrapper[4767]: I0128 18:52:50.808089 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f385fd5b-9b44-4d64-b9a5-39ffddab1c34" path="/var/lib/kubelet/pods/f385fd5b-9b44-4d64-b9a5-39ffddab1c34/volumes" Jan 28 18:52:51 crc kubenswrapper[4767]: I0128 18:52:51.689455 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"074dd138-8468-472b-a70e-e3792dd6a582","Type":"ContainerStarted","Data":"45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a"} Jan 28 18:52:51 crc kubenswrapper[4767]: I0128 18:52:51.689998 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"074dd138-8468-472b-a70e-e3792dd6a582","Type":"ContainerStarted","Data":"6e4803f4f31b16cfebb961efb23148db192fce93ce77933e370b67e06bb37fe4"} Jan 28 18:52:51 crc kubenswrapper[4767]: I0128 18:52:51.716665 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.716641027 podStartE2EDuration="2.716641027s" podCreationTimestamp="2026-01-28 18:52:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:51.711259308 +0000 UTC m=+1377.675442192" watchObservedRunningTime="2026-01-28 18:52:51.716641027 +0000 UTC m=+1377.680823891" Jan 28 18:52:52 crc kubenswrapper[4767]: I0128 18:52:52.698919 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.344331 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.450344 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-sg-core-conf-yaml\") pod \"e755a14d-bf9b-454d-aab8-ca4c790a553a\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.450430 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-scripts\") pod \"e755a14d-bf9b-454d-aab8-ca4c790a553a\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.450471 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jx6gx\" (UniqueName: \"kubernetes.io/projected/e755a14d-bf9b-454d-aab8-ca4c790a553a-kube-api-access-jx6gx\") pod \"e755a14d-bf9b-454d-aab8-ca4c790a553a\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.450644 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-run-httpd\") pod \"e755a14d-bf9b-454d-aab8-ca4c790a553a\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.450671 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-log-httpd\") pod \"e755a14d-bf9b-454d-aab8-ca4c790a553a\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.450721 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-combined-ca-bundle\") pod \"e755a14d-bf9b-454d-aab8-ca4c790a553a\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.450840 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-config-data\") pod \"e755a14d-bf9b-454d-aab8-ca4c790a553a\" (UID: \"e755a14d-bf9b-454d-aab8-ca4c790a553a\") " Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.451634 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e755a14d-bf9b-454d-aab8-ca4c790a553a" (UID: "e755a14d-bf9b-454d-aab8-ca4c790a553a"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.452314 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e755a14d-bf9b-454d-aab8-ca4c790a553a" (UID: "e755a14d-bf9b-454d-aab8-ca4c790a553a"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.458495 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e755a14d-bf9b-454d-aab8-ca4c790a553a-kube-api-access-jx6gx" (OuterVolumeSpecName: "kube-api-access-jx6gx") pod "e755a14d-bf9b-454d-aab8-ca4c790a553a" (UID: "e755a14d-bf9b-454d-aab8-ca4c790a553a"). InnerVolumeSpecName "kube-api-access-jx6gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.459072 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-scripts" (OuterVolumeSpecName: "scripts") pod "e755a14d-bf9b-454d-aab8-ca4c790a553a" (UID: "e755a14d-bf9b-454d-aab8-ca4c790a553a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.493897 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e755a14d-bf9b-454d-aab8-ca4c790a553a" (UID: "e755a14d-bf9b-454d-aab8-ca4c790a553a"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.551329 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e755a14d-bf9b-454d-aab8-ca4c790a553a" (UID: "e755a14d-bf9b-454d-aab8-ca4c790a553a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.553177 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.553252 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e755a14d-bf9b-454d-aab8-ca4c790a553a-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.553269 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.553285 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.553297 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.553312 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jx6gx\" (UniqueName: \"kubernetes.io/projected/e755a14d-bf9b-454d-aab8-ca4c790a553a-kube-api-access-jx6gx\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.573152 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-config-data" (OuterVolumeSpecName: "config-data") pod "e755a14d-bf9b-454d-aab8-ca4c790a553a" (UID: "e755a14d-bf9b-454d-aab8-ca4c790a553a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.655139 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e755a14d-bf9b-454d-aab8-ca4c790a553a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.712878 4767 generic.go:334] "Generic (PLEG): container finished" podID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerID="bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03" exitCode=0 Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.714982 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.715116 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerDied","Data":"bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03"} Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.715261 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e755a14d-bf9b-454d-aab8-ca4c790a553a","Type":"ContainerDied","Data":"2c1e8c32829f1fd52ba42fbfebefb654e64cfb4ba9867a3a216e11461927e72c"} Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.715294 4767 scope.go:117] "RemoveContainer" containerID="567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.755481 4767 scope.go:117] "RemoveContainer" containerID="732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.768667 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.790604 4767 scope.go:117] "RemoveContainer" containerID="cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.791355 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.811294 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.811933 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-notification-agent" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.811951 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-notification-agent" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.811962 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="proxy-httpd" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.811969 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="proxy-httpd" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.811993 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-central-agent" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812000 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-central-agent" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.812028 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f385fd5b-9b44-4d64-b9a5-39ffddab1c34" containerName="heat-cfnapi" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812042 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f385fd5b-9b44-4d64-b9a5-39ffddab1c34" containerName="heat-cfnapi" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.812062 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="sg-core" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812069 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="sg-core" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812303 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="proxy-httpd" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812323 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-central-agent" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812341 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="ceilometer-notification-agent" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812352 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f385fd5b-9b44-4d64-b9a5-39ffddab1c34" containerName="heat-cfnapi" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.812394 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" containerName="sg-core" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.815346 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.826625 4767 scope.go:117] "RemoveContainer" containerID="bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.826937 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.827132 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.845998 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.857913 4767 scope.go:117] "RemoveContainer" containerID="567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.858445 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6\": container with ID starting with 567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6 not found: ID does not exist" containerID="567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.858480 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6"} err="failed to get container status \"567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6\": rpc error: code = NotFound desc = could not find container \"567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6\": container with ID starting with 567e34a510554dcb895a358510d0ce815497ca01e3a9da5cb143a8db597cd4a6 not found: ID does not exist" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.858505 4767 scope.go:117] "RemoveContainer" containerID="732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.859601 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd\": container with ID starting with 732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd not found: ID does not exist" containerID="732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.859665 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd"} err="failed to get container status \"732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd\": rpc error: code = NotFound desc = could not find container \"732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd\": container with ID starting with 732ab4cf22e7356658fdbeee2b77ace028734e0e9b83a3d8e360e57374d338dd not found: ID does not exist" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.859689 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-log-httpd\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.859704 4767 scope.go:117] "RemoveContainer" containerID="cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.859769 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-scripts\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.859804 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4jvr\" (UniqueName: \"kubernetes.io/projected/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-kube-api-access-m4jvr\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.860054 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab\": container with ID starting with cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab not found: ID does not exist" containerID="cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.860078 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-run-httpd\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.860138 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.860077 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab"} err="failed to get container status \"cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab\": rpc error: code = NotFound desc = could not find container \"cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab\": container with ID starting with cc48d81ff1fa0c72347d57c231efadc3bce7b42cf663c749e0d26a4ea0d019ab not found: ID does not exist" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.860285 4767 scope.go:117] "RemoveContainer" containerID="bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.860399 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.860516 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-config-data\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: E0128 18:52:53.862911 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03\": container with ID starting with bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03 not found: ID does not exist" containerID="bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.862948 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03"} err="failed to get container status \"bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03\": rpc error: code = NotFound desc = could not find container \"bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03\": container with ID starting with bed8241bae66e5a7f54dc563b1badebba4f6acece853fa8ec5fcdfdd10636a03 not found: ID does not exist" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.962715 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-run-httpd\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.962764 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.962817 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.962877 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-config-data\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.962922 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-log-httpd\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.962974 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-scripts\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.963002 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4jvr\" (UniqueName: \"kubernetes.io/projected/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-kube-api-access-m4jvr\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.963461 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-run-httpd\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.964528 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-log-httpd\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.969847 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-scripts\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.972462 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.973987 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-config-data\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.975364 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:53 crc kubenswrapper[4767]: I0128 18:52:53.983080 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4jvr\" (UniqueName: \"kubernetes.io/projected/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-kube-api-access-m4jvr\") pod \"ceilometer-0\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " pod="openstack/ceilometer-0" Jan 28 18:52:54 crc kubenswrapper[4767]: I0128 18:52:54.148894 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:52:54 crc kubenswrapper[4767]: I0128 18:52:54.655332 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:52:54 crc kubenswrapper[4767]: I0128 18:52:54.731928 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerStarted","Data":"fc7ebda56fa089a4d90d3e7efbc7021abdd31e61e3b292761bb444d6135743ba"} Jan 28 18:52:54 crc kubenswrapper[4767]: I0128 18:52:54.812268 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e755a14d-bf9b-454d-aab8-ca4c790a553a" path="/var/lib/kubelet/pods/e755a14d-bf9b-454d-aab8-ca4c790a553a/volumes" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.199182 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.724147 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-8x72k"] Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.726111 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.736242 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.739553 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.739908 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-8x72k"] Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.746165 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerStarted","Data":"4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95"} Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.913976 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qh9qf\" (UniqueName: \"kubernetes.io/projected/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-kube-api-access-qh9qf\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.914050 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-scripts\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.914105 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-config-data\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:55 crc kubenswrapper[4767]: I0128 18:52:55.914684 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.019741 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.019870 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qh9qf\" (UniqueName: \"kubernetes.io/projected/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-kube-api-access-qh9qf\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.019903 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-scripts\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.019952 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-config-data\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.029491 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.051469 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-scripts\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.061030 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-config-data\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.146980 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qh9qf\" (UniqueName: \"kubernetes.io/projected/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-kube-api-access-qh9qf\") pod \"nova-cell0-cell-mapping-8x72k\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.229715 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.231474 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.270961 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.331515 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-logs\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.331592 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-config-data\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.331634 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.331777 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m6wqc\" (UniqueName: \"kubernetes.io/projected/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-kube-api-access-m6wqc\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.351032 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.387888 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.389552 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.403660 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.426560 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.442282 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.442598 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m6wqc\" (UniqueName: \"kubernetes.io/projected/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-kube-api-access-m6wqc\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.442647 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-56rf2\" (UniqueName: \"kubernetes.io/projected/28e9a69e-421c-41d9-b00d-dcdcea88b828-kube-api-access-56rf2\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.442963 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-logs\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.443034 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.443055 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-config-data\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.443103 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.445433 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-logs\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.483437 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.533547 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.539582 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-config-data\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.540559 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.543113 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.544436 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m6wqc\" (UniqueName: \"kubernetes.io/projected/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-kube-api-access-m6wqc\") pod \"nova-api-0\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.553154 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.555083 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.555365 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.555630 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-56rf2\" (UniqueName: \"kubernetes.io/projected/28e9a69e-421c-41d9-b00d-dcdcea88b828-kube-api-access-56rf2\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.588480 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.589005 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.599070 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.643887 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-56rf2\" (UniqueName: \"kubernetes.io/projected/28e9a69e-421c-41d9-b00d-dcdcea88b828-kube-api-access-56rf2\") pod \"nova-cell1-novncproxy-0\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.663480 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x64tx\" (UniqueName: \"kubernetes.io/projected/5c6f4dc4-6288-497b-b884-985966e6c5df-kube-api-access-x64tx\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.664088 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-config-data\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.664418 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.664552 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c6f4dc4-6288-497b-b884-985966e6c5df-logs\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.666692 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.668509 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.670961 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.694257 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.713356 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.734874 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.757541 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-8ntj2"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.759715 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.767122 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-config-data\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.767288 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.767351 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c6f4dc4-6288-497b-b884-985966e6c5df-logs\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.767414 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.767588 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x64tx\" (UniqueName: \"kubernetes.io/projected/5c6f4dc4-6288-497b-b884-985966e6c5df-kube-api-access-x64tx\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.767641 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7cs8f\" (UniqueName: \"kubernetes.io/projected/7b762052-77b4-487b-9dfc-479e0478cd58-kube-api-access-7cs8f\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.767701 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-config-data\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.768841 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c6f4dc4-6288-497b-b884-985966e6c5df-logs\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.815097 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.815238 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-config-data\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.816546 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x64tx\" (UniqueName: \"kubernetes.io/projected/5c6f4dc4-6288-497b-b884-985966e6c5df-kube-api-access-x64tx\") pod \"nova-metadata-0\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.869328 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-config\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.869844 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7cs8f\" (UniqueName: \"kubernetes.io/projected/7b762052-77b4-487b-9dfc-479e0478cd58-kube-api-access-7cs8f\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.869956 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wc9cv\" (UniqueName: \"kubernetes.io/projected/6b257523-2a53-41a6-92a5-a2e7467b4851-kube-api-access-wc9cv\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.870010 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-config-data\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.870066 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.870092 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.870144 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.870164 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.870187 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-8ntj2"] Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.870273 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.877899 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-config-data\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.919753 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.941741 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7cs8f\" (UniqueName: \"kubernetes.io/projected/7b762052-77b4-487b-9dfc-479e0478cd58-kube-api-access-7cs8f\") pod \"nova-scheduler-0\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " pod="openstack/nova-scheduler-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.949137 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.981261 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-config\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.981397 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wc9cv\" (UniqueName: \"kubernetes.io/projected/6b257523-2a53-41a6-92a5-a2e7467b4851-kube-api-access-wc9cv\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.981471 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.981504 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.981557 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.981581 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.983307 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-swift-storage-0\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.985945 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-sb\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:56 crc kubenswrapper[4767]: I0128 18:52:56.994702 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-nb\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.000052 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-svc\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.000647 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-config\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.013402 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.027151 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wc9cv\" (UniqueName: \"kubernetes.io/projected/6b257523-2a53-41a6-92a5-a2e7467b4851-kube-api-access-wc9cv\") pod \"dnsmasq-dns-568d7fd7cf-8ntj2\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.101047 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.254594 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-8x72k"] Jan 28 18:52:57 crc kubenswrapper[4767]: W0128 18:52:57.300277 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf9f9d061_f0ef_4bb6_8b3c_82838fedc33f.slice/crio-2170ef28c9162a00fa6c6e4552be38e4f50b310bd984c20407c5fb9469d4f7d6 WatchSource:0}: Error finding container 2170ef28c9162a00fa6c6e4552be38e4f50b310bd984c20407c5fb9469d4f7d6: Status 404 returned error can't find the container with id 2170ef28c9162a00fa6c6e4552be38e4f50b310bd984c20407c5fb9469d4f7d6 Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.460101 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:52:57 crc kubenswrapper[4767]: W0128 18:52:57.897012 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod28e9a69e_421c_41d9_b00d_dcdcea88b828.slice/crio-8d4fd1ac2f11e38b611980d762b5f816c4d330f3923582c20ebb1c04eb231828 WatchSource:0}: Error finding container 8d4fd1ac2f11e38b611980d762b5f816c4d330f3923582c20ebb1c04eb231828: Status 404 returned error can't find the container with id 8d4fd1ac2f11e38b611980d762b5f816c4d330f3923582c20ebb1c04eb231828 Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.902310 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7ad1b44-d718-494d-8a8e-e09d2c7f306a","Type":"ContainerStarted","Data":"29b8ff59205f77b07424f95a56359d5bea97d257d897fa31a4355fcdf1052f89"} Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.934280 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.950785 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerStarted","Data":"dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec"} Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.971422 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8x72k" event={"ID":"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f","Type":"ContainerStarted","Data":"b8eea234273b2b3910c136fb5ad82f22bb028b9f1b75f8ec921682db0abe37ed"} Jan 28 18:52:57 crc kubenswrapper[4767]: I0128 18:52:57.974484 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8x72k" event={"ID":"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f","Type":"ContainerStarted","Data":"2170ef28c9162a00fa6c6e4552be38e4f50b310bd984c20407c5fb9469d4f7d6"} Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.024974 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:52:58 crc kubenswrapper[4767]: W0128 18:52:58.026419 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c6f4dc4_6288_497b_b884_985966e6c5df.slice/crio-e1b4b52b03a8c0fe2bbcc20a165500a396723844f81f4c39c871ada18d759a88 WatchSource:0}: Error finding container e1b4b52b03a8c0fe2bbcc20a165500a396723844f81f4c39c871ada18d759a88: Status 404 returned error can't find the container with id e1b4b52b03a8c0fe2bbcc20a165500a396723844f81f4c39c871ada18d759a88 Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.094356 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.103630 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-8x72k" podStartSLOduration=3.103605478 podStartE2EDuration="3.103605478s" podCreationTimestamp="2026-01-28 18:52:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:52:58.003815396 +0000 UTC m=+1383.967998270" watchObservedRunningTime="2026-01-28 18:52:58.103605478 +0000 UTC m=+1384.067788352" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.216758 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-8ntj2"] Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.317962 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t8vsr"] Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.319520 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.324849 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.325097 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.331119 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t8vsr"] Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.484435 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.484513 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsr9b\" (UniqueName: \"kubernetes.io/projected/bd968858-329e-4d57-8cd7-364a5e852eea-kube-api-access-rsr9b\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.484699 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-scripts\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.484843 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-config-data\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.588019 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.588667 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsr9b\" (UniqueName: \"kubernetes.io/projected/bd968858-329e-4d57-8cd7-364a5e852eea-kube-api-access-rsr9b\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.588754 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-scripts\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.588823 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-config-data\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.598621 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-scripts\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.599370 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-config-data\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.605887 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.608845 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsr9b\" (UniqueName: \"kubernetes.io/projected/bd968858-329e-4d57-8cd7-364a5e852eea-kube-api-access-rsr9b\") pod \"nova-cell1-conductor-db-sync-t8vsr\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:58 crc kubenswrapper[4767]: I0128 18:52:58.659039 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.007611 4767 generic.go:334] "Generic (PLEG): container finished" podID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerID="aead0fa7509e6af01dd88ac772fb037f232d2e3963a51413da8088a42415dc2a" exitCode=0 Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.008179 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" event={"ID":"6b257523-2a53-41a6-92a5-a2e7467b4851","Type":"ContainerDied","Data":"aead0fa7509e6af01dd88ac772fb037f232d2e3963a51413da8088a42415dc2a"} Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.008345 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" event={"ID":"6b257523-2a53-41a6-92a5-a2e7467b4851","Type":"ContainerStarted","Data":"0f187c343e67431bd8d5d750fe90aa1c8ec742ed6b2a67d549c1a0110f5c9f6f"} Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.026906 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"28e9a69e-421c-41d9-b00d-dcdcea88b828","Type":"ContainerStarted","Data":"8d4fd1ac2f11e38b611980d762b5f816c4d330f3923582c20ebb1c04eb231828"} Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.054870 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerStarted","Data":"74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c"} Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.056787 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7b762052-77b4-487b-9dfc-479e0478cd58","Type":"ContainerStarted","Data":"761688b15b3a8dd6a74d6057d536368c53a0d4c22c0232420cfbf6fa1109b6df"} Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.060022 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c6f4dc4-6288-497b-b884-985966e6c5df","Type":"ContainerStarted","Data":"e1b4b52b03a8c0fe2bbcc20a165500a396723844f81f4c39c871ada18d759a88"} Jan 28 18:52:59 crc kubenswrapper[4767]: I0128 18:52:59.309038 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t8vsr"] Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.086429 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" event={"ID":"bd968858-329e-4d57-8cd7-364a5e852eea","Type":"ContainerStarted","Data":"022d4cc40e71f52a4865efd9da768743d6a198fc73e69d953171c2b293add2a7"} Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.098470 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" event={"ID":"6b257523-2a53-41a6-92a5-a2e7467b4851","Type":"ContainerStarted","Data":"297ee69c14da0819d35c485005a508940e179ff9de18f00bbed9f0aab99405af"} Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.098985 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.122941 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.133724 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" podStartSLOduration=4.133701757 podStartE2EDuration="4.133701757s" podCreationTimestamp="2026-01-28 18:52:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:00.130960981 +0000 UTC m=+1386.095143855" watchObservedRunningTime="2026-01-28 18:53:00.133701757 +0000 UTC m=+1386.097884621" Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.170955 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.670562 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.767469 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.767959 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="074dd138-8468-472b-a70e-e3792dd6a582" containerName="nova-cell0-conductor-conductor" containerID="cri-o://45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a" gracePeriod=30 Jan 28 18:53:00 crc kubenswrapper[4767]: I0128 18:53:00.823857 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:53:02 crc kubenswrapper[4767]: I0128 18:53:02.140513 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" event={"ID":"bd968858-329e-4d57-8cd7-364a5e852eea","Type":"ContainerStarted","Data":"e073f7a0342841c72e27008fe373a0b937c630b20a94bc7a967404b5f9f4202f"} Jan 28 18:53:02 crc kubenswrapper[4767]: I0128 18:53:02.197791 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" podStartSLOduration=4.197767255 podStartE2EDuration="4.197767255s" podCreationTimestamp="2026-01-28 18:52:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:02.177193818 +0000 UTC m=+1388.141376702" watchObservedRunningTime="2026-01-28 18:53:02.197767255 +0000 UTC m=+1388.161950139" Jan 28 18:53:02 crc kubenswrapper[4767]: I0128 18:53:02.606689 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:03 crc kubenswrapper[4767]: I0128 18:53:03.992018 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.068154 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-combined-ca-bundle\") pod \"074dd138-8468-472b-a70e-e3792dd6a582\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.068251 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jwf6\" (UniqueName: \"kubernetes.io/projected/074dd138-8468-472b-a70e-e3792dd6a582-kube-api-access-5jwf6\") pod \"074dd138-8468-472b-a70e-e3792dd6a582\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.068454 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-config-data\") pod \"074dd138-8468-472b-a70e-e3792dd6a582\" (UID: \"074dd138-8468-472b-a70e-e3792dd6a582\") " Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.087151 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/074dd138-8468-472b-a70e-e3792dd6a582-kube-api-access-5jwf6" (OuterVolumeSpecName: "kube-api-access-5jwf6") pod "074dd138-8468-472b-a70e-e3792dd6a582" (UID: "074dd138-8468-472b-a70e-e3792dd6a582"). InnerVolumeSpecName "kube-api-access-5jwf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.121680 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-config-data" (OuterVolumeSpecName: "config-data") pod "074dd138-8468-472b-a70e-e3792dd6a582" (UID: "074dd138-8468-472b-a70e-e3792dd6a582"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.160446 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "074dd138-8468-472b-a70e-e3792dd6a582" (UID: "074dd138-8468-472b-a70e-e3792dd6a582"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.171881 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.171934 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/074dd138-8468-472b-a70e-e3792dd6a582-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.171954 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jwf6\" (UniqueName: \"kubernetes.io/projected/074dd138-8468-472b-a70e-e3792dd6a582-kube-api-access-5jwf6\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.175710 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7ad1b44-d718-494d-8a8e-e09d2c7f306a","Type":"ContainerStarted","Data":"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5"} Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.182129 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerStarted","Data":"adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406"} Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.182424 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-central-agent" containerID="cri-o://4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95" gracePeriod=30 Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.182515 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.182986 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="proxy-httpd" containerID="cri-o://adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406" gracePeriod=30 Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.183042 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="sg-core" containerID="cri-o://74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c" gracePeriod=30 Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.183093 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-notification-agent" containerID="cri-o://dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec" gracePeriod=30 Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.189777 4767 generic.go:334] "Generic (PLEG): container finished" podID="074dd138-8468-472b-a70e-e3792dd6a582" containerID="45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a" exitCode=0 Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.189905 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"074dd138-8468-472b-a70e-e3792dd6a582","Type":"ContainerDied","Data":"45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a"} Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.189939 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"074dd138-8468-472b-a70e-e3792dd6a582","Type":"ContainerDied","Data":"6e4803f4f31b16cfebb961efb23148db192fce93ce77933e370b67e06bb37fe4"} Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.189958 4767 scope.go:117] "RemoveContainer" containerID="45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.190126 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.199421 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c6f4dc4-6288-497b-b884-985966e6c5df","Type":"ContainerStarted","Data":"4f4cfda14a440771fc4421ab49376af367ae855457f8f61770c279422f387af4"} Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.202355 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"28e9a69e-421c-41d9-b00d-dcdcea88b828","Type":"ContainerStarted","Data":"f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d"} Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.202477 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="28e9a69e-421c-41d9-b00d-dcdcea88b828" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d" gracePeriod=30 Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.215934 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.403357034 podStartE2EDuration="11.215903149s" podCreationTimestamp="2026-01-28 18:52:53 +0000 UTC" firstStartedPulling="2026-01-28 18:52:54.665774133 +0000 UTC m=+1380.629957007" lastFinishedPulling="2026-01-28 18:53:03.478320248 +0000 UTC m=+1389.442503122" observedRunningTime="2026-01-28 18:53:04.209961442 +0000 UTC m=+1390.174144316" watchObservedRunningTime="2026-01-28 18:53:04.215903149 +0000 UTC m=+1390.180086023" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.296244 4767 scope.go:117] "RemoveContainer" containerID="45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a" Jan 28 18:53:04 crc kubenswrapper[4767]: E0128 18:53:04.297057 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a\": container with ID starting with 45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a not found: ID does not exist" containerID="45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.297147 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a"} err="failed to get container status \"45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a\": rpc error: code = NotFound desc = could not find container \"45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a\": container with ID starting with 45ecae17857bfbd142c96c4f8d3fcedcd09169d7dba7c5396c205be291fd7e5a not found: ID does not exist" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.342138 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.8056683639999997 podStartE2EDuration="8.34210344s" podCreationTimestamp="2026-01-28 18:52:56 +0000 UTC" firstStartedPulling="2026-01-28 18:52:57.936888415 +0000 UTC m=+1383.901071289" lastFinishedPulling="2026-01-28 18:53:03.473323491 +0000 UTC m=+1389.437506365" observedRunningTime="2026-01-28 18:53:04.261927514 +0000 UTC m=+1390.226110388" watchObservedRunningTime="2026-01-28 18:53:04.34210344 +0000 UTC m=+1390.306286314" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.369836 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.425337 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.452422 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:04 crc kubenswrapper[4767]: E0128 18:53:04.453109 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="074dd138-8468-472b-a70e-e3792dd6a582" containerName="nova-cell0-conductor-conductor" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.453135 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="074dd138-8468-472b-a70e-e3792dd6a582" containerName="nova-cell0-conductor-conductor" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.453471 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="074dd138-8468-472b-a70e-e3792dd6a582" containerName="nova-cell0-conductor-conductor" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.454477 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.472155 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.473085 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.548392 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:04 crc kubenswrapper[4767]: E0128 18:53:04.549449 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle config-data kube-api-access-spvxv], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/nova-cell0-conductor-0" podUID="062dce82-9296-48da-a29d-2f42ff09e306" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.607533 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spvxv\" (UniqueName: \"kubernetes.io/projected/062dce82-9296-48da-a29d-2f42ff09e306-kube-api-access-spvxv\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.612536 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.612642 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.715812 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spvxv\" (UniqueName: \"kubernetes.io/projected/062dce82-9296-48da-a29d-2f42ff09e306-kube-api-access-spvxv\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.716301 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.716486 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.726708 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.727409 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.736487 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spvxv\" (UniqueName: \"kubernetes.io/projected/062dce82-9296-48da-a29d-2f42ff09e306-kube-api-access-spvxv\") pod \"nova-cell0-conductor-0\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:04 crc kubenswrapper[4767]: I0128 18:53:04.809494 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="074dd138-8468-472b-a70e-e3792dd6a582" path="/var/lib/kubelet/pods/074dd138-8468-472b-a70e-e3792dd6a582/volumes" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.216989 4767 generic.go:334] "Generic (PLEG): container finished" podID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerID="74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c" exitCode=2 Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.218619 4767 generic.go:334] "Generic (PLEG): container finished" podID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerID="dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec" exitCode=0 Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.217226 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerDied","Data":"74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c"} Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.218938 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerDied","Data":"dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec"} Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.223064 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7b762052-77b4-487b-9dfc-479e0478cd58","Type":"ContainerStarted","Data":"4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d"} Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.223404 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="7b762052-77b4-487b-9dfc-479e0478cd58" containerName="nova-scheduler-scheduler" containerID="cri-o://4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d" gracePeriod=30 Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.234607 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c6f4dc4-6288-497b-b884-985966e6c5df","Type":"ContainerStarted","Data":"8db0668610eaa32477d70599ebb733ed86568f2c5ba6fd4ee24bbb812d918944"} Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.235249 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-metadata" containerID="cri-o://8db0668610eaa32477d70599ebb733ed86568f2c5ba6fd4ee24bbb812d918944" gracePeriod=30 Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.235917 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-log" containerID="cri-o://4f4cfda14a440771fc4421ab49376af367ae855457f8f61770c279422f387af4" gracePeriod=30 Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.244120 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.244249 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7ad1b44-d718-494d-8a8e-e09d2c7f306a","Type":"ContainerStarted","Data":"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615"} Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.244357 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-log" containerID="cri-o://37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5" gracePeriod=30 Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.245296 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-api" containerID="cri-o://a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615" gracePeriod=30 Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.263153 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.289333 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.866655676 podStartE2EDuration="9.289301931s" podCreationTimestamp="2026-01-28 18:52:56 +0000 UTC" firstStartedPulling="2026-01-28 18:52:58.049756798 +0000 UTC m=+1384.013939672" lastFinishedPulling="2026-01-28 18:53:03.472403053 +0000 UTC m=+1389.436585927" observedRunningTime="2026-01-28 18:53:05.277311594 +0000 UTC m=+1391.241494488" watchObservedRunningTime="2026-01-28 18:53:05.289301931 +0000 UTC m=+1391.253484805" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.320377 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.886793409 podStartE2EDuration="9.320342175s" podCreationTimestamp="2026-01-28 18:52:56 +0000 UTC" firstStartedPulling="2026-01-28 18:52:58.045907547 +0000 UTC m=+1384.010090421" lastFinishedPulling="2026-01-28 18:53:03.479456313 +0000 UTC m=+1389.443639187" observedRunningTime="2026-01-28 18:53:05.316926468 +0000 UTC m=+1391.281109352" watchObservedRunningTime="2026-01-28 18:53:05.320342175 +0000 UTC m=+1391.284525049" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.347089 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.367098807 podStartE2EDuration="9.347064773s" podCreationTimestamp="2026-01-28 18:52:56 +0000 UTC" firstStartedPulling="2026-01-28 18:52:57.492833788 +0000 UTC m=+1383.457016662" lastFinishedPulling="2026-01-28 18:53:03.472799744 +0000 UTC m=+1389.436982628" observedRunningTime="2026-01-28 18:53:05.342848881 +0000 UTC m=+1391.307031775" watchObservedRunningTime="2026-01-28 18:53:05.347064773 +0000 UTC m=+1391.311247647" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.437949 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spvxv\" (UniqueName: \"kubernetes.io/projected/062dce82-9296-48da-a29d-2f42ff09e306-kube-api-access-spvxv\") pod \"062dce82-9296-48da-a29d-2f42ff09e306\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.438307 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-combined-ca-bundle\") pod \"062dce82-9296-48da-a29d-2f42ff09e306\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.438376 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-config-data\") pod \"062dce82-9296-48da-a29d-2f42ff09e306\" (UID: \"062dce82-9296-48da-a29d-2f42ff09e306\") " Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.445664 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/062dce82-9296-48da-a29d-2f42ff09e306-kube-api-access-spvxv" (OuterVolumeSpecName: "kube-api-access-spvxv") pod "062dce82-9296-48da-a29d-2f42ff09e306" (UID: "062dce82-9296-48da-a29d-2f42ff09e306"). InnerVolumeSpecName "kube-api-access-spvxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.446676 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-config-data" (OuterVolumeSpecName: "config-data") pod "062dce82-9296-48da-a29d-2f42ff09e306" (UID: "062dce82-9296-48da-a29d-2f42ff09e306"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.447842 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "062dce82-9296-48da-a29d-2f42ff09e306" (UID: "062dce82-9296-48da-a29d-2f42ff09e306"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.541903 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.542459 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/062dce82-9296-48da-a29d-2f42ff09e306-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:05 crc kubenswrapper[4767]: I0128 18:53:05.542473 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spvxv\" (UniqueName: \"kubernetes.io/projected/062dce82-9296-48da-a29d-2f42ff09e306-kube-api-access-spvxv\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.232969 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.268395 4767 generic.go:334] "Generic (PLEG): container finished" podID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerID="a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615" exitCode=0 Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.268432 4767 generic.go:334] "Generic (PLEG): container finished" podID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerID="37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5" exitCode=143 Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.268482 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7ad1b44-d718-494d-8a8e-e09d2c7f306a","Type":"ContainerDied","Data":"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615"} Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.268591 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.269487 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7ad1b44-d718-494d-8a8e-e09d2c7f306a","Type":"ContainerDied","Data":"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5"} Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.269512 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"a7ad1b44-d718-494d-8a8e-e09d2c7f306a","Type":"ContainerDied","Data":"29b8ff59205f77b07424f95a56359d5bea97d257d897fa31a4355fcdf1052f89"} Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.269536 4767 scope.go:117] "RemoveContainer" containerID="a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.271726 4767 generic.go:334] "Generic (PLEG): container finished" podID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerID="8db0668610eaa32477d70599ebb733ed86568f2c5ba6fd4ee24bbb812d918944" exitCode=0 Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.271750 4767 generic.go:334] "Generic (PLEG): container finished" podID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerID="4f4cfda14a440771fc4421ab49376af367ae855457f8f61770c279422f387af4" exitCode=143 Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.271778 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c6f4dc4-6288-497b-b884-985966e6c5df","Type":"ContainerDied","Data":"8db0668610eaa32477d70599ebb733ed86568f2c5ba6fd4ee24bbb812d918944"} Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.271847 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c6f4dc4-6288-497b-b884-985966e6c5df","Type":"ContainerDied","Data":"4f4cfda14a440771fc4421ab49376af367ae855457f8f61770c279422f387af4"} Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.271802 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.304566 4767 scope.go:117] "RemoveContainer" containerID="37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.366570 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-logs\") pod \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.366729 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-combined-ca-bundle\") pod \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.366764 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-config-data\") pod \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.366943 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m6wqc\" (UniqueName: \"kubernetes.io/projected/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-kube-api-access-m6wqc\") pod \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\" (UID: \"a7ad1b44-d718-494d-8a8e-e09d2c7f306a\") " Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.374971 4767 scope.go:117] "RemoveContainer" containerID="a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.376069 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-kube-api-access-m6wqc" (OuterVolumeSpecName: "kube-api-access-m6wqc") pod "a7ad1b44-d718-494d-8a8e-e09d2c7f306a" (UID: "a7ad1b44-d718-494d-8a8e-e09d2c7f306a"). InnerVolumeSpecName "kube-api-access-m6wqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:06 crc kubenswrapper[4767]: E0128 18:53:06.383619 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615\": container with ID starting with a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615 not found: ID does not exist" containerID="a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.383680 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615"} err="failed to get container status \"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615\": rpc error: code = NotFound desc = could not find container \"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615\": container with ID starting with a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615 not found: ID does not exist" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.383713 4767 scope.go:117] "RemoveContainer" containerID="37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.384281 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-logs" (OuterVolumeSpecName: "logs") pod "a7ad1b44-d718-494d-8a8e-e09d2c7f306a" (UID: "a7ad1b44-d718-494d-8a8e-e09d2c7f306a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:06 crc kubenswrapper[4767]: E0128 18:53:06.395456 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5\": container with ID starting with 37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5 not found: ID does not exist" containerID="37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.395579 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5"} err="failed to get container status \"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5\": rpc error: code = NotFound desc = could not find container \"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5\": container with ID starting with 37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5 not found: ID does not exist" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.395623 4767 scope.go:117] "RemoveContainer" containerID="a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.398451 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615"} err="failed to get container status \"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615\": rpc error: code = NotFound desc = could not find container \"a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615\": container with ID starting with a8552e62bbe26896c97c14e3f16262a9ec30bf50f7d77119210d1ed8babb9615 not found: ID does not exist" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.398516 4767 scope.go:117] "RemoveContainer" containerID="37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.398712 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.399622 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5"} err="failed to get container status \"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5\": rpc error: code = NotFound desc = could not find container \"37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5\": container with ID starting with 37035389fad39b2d68ed5b99954103a1a1eca05a114b11c5af574b8749afb4b5 not found: ID does not exist" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.420264 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-config-data" (OuterVolumeSpecName: "config-data") pod "a7ad1b44-d718-494d-8a8e-e09d2c7f306a" (UID: "a7ad1b44-d718-494d-8a8e-e09d2c7f306a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.441543 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a7ad1b44-d718-494d-8a8e-e09d2c7f306a" (UID: "a7ad1b44-d718-494d-8a8e-e09d2c7f306a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.446160 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.473594 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.473648 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.473664 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.473684 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m6wqc\" (UniqueName: \"kubernetes.io/projected/a7ad1b44-d718-494d-8a8e-e09d2c7f306a-kube-api-access-m6wqc\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.482249 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: E0128 18:53:06.483711 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-api" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.483743 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-api" Jan 28 18:53:06 crc kubenswrapper[4767]: E0128 18:53:06.483782 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-log" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.483798 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-log" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.484363 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-api" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.484403 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" containerName="nova-api-log" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.485828 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.502807 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.532690 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.578072 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2799053-dd71-462c-bb14-ccdee947780e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.584127 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cchtx\" (UniqueName: \"kubernetes.io/projected/f2799053-dd71-462c-bb14-ccdee947780e-kube-api-access-cchtx\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.584324 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2799053-dd71-462c-bb14-ccdee947780e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.632228 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.643854 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.667813 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.671061 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.680627 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.683526 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.687396 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cchtx\" (UniqueName: \"kubernetes.io/projected/f2799053-dd71-462c-bb14-ccdee947780e-kube-api-access-cchtx\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.687788 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2799053-dd71-462c-bb14-ccdee947780e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.687945 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2799053-dd71-462c-bb14-ccdee947780e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.712691 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f2799053-dd71-462c-bb14-ccdee947780e-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.721560 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f2799053-dd71-462c-bb14-ccdee947780e-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.724506 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cchtx\" (UniqueName: \"kubernetes.io/projected/f2799053-dd71-462c-bb14-ccdee947780e-kube-api-access-cchtx\") pod \"nova-cell0-conductor-0\" (UID: \"f2799053-dd71-462c-bb14-ccdee947780e\") " pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.735940 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.790591 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42kmw\" (UniqueName: \"kubernetes.io/projected/d9cee437-7c79-450e-906c-6f0d1c26a12c-kube-api-access-42kmw\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.790676 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.791227 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-config-data\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.791391 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9cee437-7c79-450e-906c-6f0d1c26a12c-logs\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.812356 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="062dce82-9296-48da-a29d-2f42ff09e306" path="/var/lib/kubelet/pods/062dce82-9296-48da-a29d-2f42ff09e306/volumes" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.812933 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a7ad1b44-d718-494d-8a8e-e09d2c7f306a" path="/var/lib/kubelet/pods/a7ad1b44-d718-494d-8a8e-e09d2c7f306a/volumes" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.844954 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.889667 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.893851 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9cee437-7c79-450e-906c-6f0d1c26a12c-logs\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.895082 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42kmw\" (UniqueName: \"kubernetes.io/projected/d9cee437-7c79-450e-906c-6f0d1c26a12c-kube-api-access-42kmw\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.895224 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.895567 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-config-data\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.897527 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9cee437-7c79-450e-906c-6f0d1c26a12c-logs\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.901925 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-config-data\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.907999 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.920868 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42kmw\" (UniqueName: \"kubernetes.io/projected/d9cee437-7c79-450e-906c-6f0d1c26a12c-kube-api-access-42kmw\") pod \"nova-api-0\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " pod="openstack/nova-api-0" Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.997177 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-combined-ca-bundle\") pod \"5c6f4dc4-6288-497b-b884-985966e6c5df\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.997302 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-config-data\") pod \"5c6f4dc4-6288-497b-b884-985966e6c5df\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.997340 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c6f4dc4-6288-497b-b884-985966e6c5df-logs\") pod \"5c6f4dc4-6288-497b-b884-985966e6c5df\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " Jan 28 18:53:06 crc kubenswrapper[4767]: I0128 18:53:06.997383 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x64tx\" (UniqueName: \"kubernetes.io/projected/5c6f4dc4-6288-497b-b884-985966e6c5df-kube-api-access-x64tx\") pod \"5c6f4dc4-6288-497b-b884-985966e6c5df\" (UID: \"5c6f4dc4-6288-497b-b884-985966e6c5df\") " Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.003376 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c6f4dc4-6288-497b-b884-985966e6c5df-logs" (OuterVolumeSpecName: "logs") pod "5c6f4dc4-6288-497b-b884-985966e6c5df" (UID: "5c6f4dc4-6288-497b-b884-985966e6c5df"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.012983 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c6f4dc4-6288-497b-b884-985966e6c5df-kube-api-access-x64tx" (OuterVolumeSpecName: "kube-api-access-x64tx") pod "5c6f4dc4-6288-497b-b884-985966e6c5df" (UID: "5c6f4dc4-6288-497b-b884-985966e6c5df"). InnerVolumeSpecName "kube-api-access-x64tx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.013634 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.078817 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5c6f4dc4-6288-497b-b884-985966e6c5df" (UID: "5c6f4dc4-6288-497b-b884-985966e6c5df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.083381 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-config-data" (OuterVolumeSpecName: "config-data") pod "5c6f4dc4-6288-497b-b884-985966e6c5df" (UID: "5c6f4dc4-6288-497b-b884-985966e6c5df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.101082 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.101292 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5c6f4dc4-6288-497b-b884-985966e6c5df-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.101314 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5c6f4dc4-6288-497b-b884-985966e6c5df-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.101331 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x64tx\" (UniqueName: \"kubernetes.io/projected/5c6f4dc4-6288-497b-b884-985966e6c5df-kube-api-access-x64tx\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.104011 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.145602 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.224887 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d9w94"] Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.225247 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" podUID="530c6c66-df17-4d54-b254-b0bf9e860545" containerName="dnsmasq-dns" containerID="cri-o://86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528" gracePeriod=10 Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.311314 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"5c6f4dc4-6288-497b-b884-985966e6c5df","Type":"ContainerDied","Data":"e1b4b52b03a8c0fe2bbcc20a165500a396723844f81f4c39c871ada18d759a88"} Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.311414 4767 scope.go:117] "RemoveContainer" containerID="8db0668610eaa32477d70599ebb733ed86568f2c5ba6fd4ee24bbb812d918944" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.311589 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.383490 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.431854 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.435292 4767 scope.go:117] "RemoveContainer" containerID="4f4cfda14a440771fc4421ab49376af367ae855457f8f61770c279422f387af4" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.455958 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:07 crc kubenswrapper[4767]: E0128 18:53:07.456729 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-metadata" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.456758 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-metadata" Jan 28 18:53:07 crc kubenswrapper[4767]: E0128 18:53:07.456792 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-log" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.456800 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-log" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.457137 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-metadata" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.457159 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" containerName="nova-metadata-log" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.459315 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.464310 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.464682 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.468933 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.530419 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9740068-f09c-4f68-9d36-166f7c26d7d9-logs\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.530490 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.530604 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vzr8\" (UniqueName: \"kubernetes.io/projected/a9740068-f09c-4f68-9d36-166f7c26d7d9-kube-api-access-9vzr8\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.530693 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-config-data\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.530743 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.591074 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.632695 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-config-data\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.632774 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.632840 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9740068-f09c-4f68-9d36-166f7c26d7d9-logs\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.632873 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.632933 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vzr8\" (UniqueName: \"kubernetes.io/projected/a9740068-f09c-4f68-9d36-166f7c26d7d9-kube-api-access-9vzr8\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.637475 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9740068-f09c-4f68-9d36-166f7c26d7d9-logs\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.643998 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.645700 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.653065 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-config-data\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.656990 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vzr8\" (UniqueName: \"kubernetes.io/projected/a9740068-f09c-4f68-9d36-166f7c26d7d9-kube-api-access-9vzr8\") pod \"nova-metadata-0\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.812660 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:07 crc kubenswrapper[4767]: I0128 18:53:07.849641 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:07 crc kubenswrapper[4767]: W0128 18:53:07.868848 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9cee437_7c79_450e_906c_6f0d1c26a12c.slice/crio-f5d052bffe1b1095ec0e30da6e427e8f44774f1b8fcde01e6536de965b0ba809 WatchSource:0}: Error finding container f5d052bffe1b1095ec0e30da6e427e8f44774f1b8fcde01e6536de965b0ba809: Status 404 returned error can't find the container with id f5d052bffe1b1095ec0e30da6e427e8f44774f1b8fcde01e6536de965b0ba809 Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.035731 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.146162 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-config\") pod \"530c6c66-df17-4d54-b254-b0bf9e860545\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.149660 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-sb\") pod \"530c6c66-df17-4d54-b254-b0bf9e860545\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.149723 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-svc\") pod \"530c6c66-df17-4d54-b254-b0bf9e860545\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.149871 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-nb\") pod \"530c6c66-df17-4d54-b254-b0bf9e860545\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.149951 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-swift-storage-0\") pod \"530c6c66-df17-4d54-b254-b0bf9e860545\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.150082 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6rlnk\" (UniqueName: \"kubernetes.io/projected/530c6c66-df17-4d54-b254-b0bf9e860545-kube-api-access-6rlnk\") pod \"530c6c66-df17-4d54-b254-b0bf9e860545\" (UID: \"530c6c66-df17-4d54-b254-b0bf9e860545\") " Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.174894 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/530c6c66-df17-4d54-b254-b0bf9e860545-kube-api-access-6rlnk" (OuterVolumeSpecName: "kube-api-access-6rlnk") pod "530c6c66-df17-4d54-b254-b0bf9e860545" (UID: "530c6c66-df17-4d54-b254-b0bf9e860545"). InnerVolumeSpecName "kube-api-access-6rlnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.240515 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-config" (OuterVolumeSpecName: "config") pod "530c6c66-df17-4d54-b254-b0bf9e860545" (UID: "530c6c66-df17-4d54-b254-b0bf9e860545"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.262692 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6rlnk\" (UniqueName: \"kubernetes.io/projected/530c6c66-df17-4d54-b254-b0bf9e860545-kube-api-access-6rlnk\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.262732 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.272653 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "530c6c66-df17-4d54-b254-b0bf9e860545" (UID: "530c6c66-df17-4d54-b254-b0bf9e860545"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.276088 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "530c6c66-df17-4d54-b254-b0bf9e860545" (UID: "530c6c66-df17-4d54-b254-b0bf9e860545"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.294366 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "530c6c66-df17-4d54-b254-b0bf9e860545" (UID: "530c6c66-df17-4d54-b254-b0bf9e860545"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.373493 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d9cee437-7c79-450e-906c-6f0d1c26a12c","Type":"ContainerStarted","Data":"35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c"} Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.373569 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d9cee437-7c79-450e-906c-6f0d1c26a12c","Type":"ContainerStarted","Data":"f5d052bffe1b1095ec0e30da6e427e8f44774f1b8fcde01e6536de965b0ba809"} Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.374079 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.374138 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.374158 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.418908 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f2799053-dd71-462c-bb14-ccdee947780e","Type":"ContainerStarted","Data":"4c9c228254f779b9fa0fc9e2d85b2a38e7edc91eb09309825cd08feb80d54060"} Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.418982 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"f2799053-dd71-462c-bb14-ccdee947780e","Type":"ContainerStarted","Data":"2b4620aa87793c9169b6812337bd1f6ea990eac171579a48d2f5503fb0a71400"} Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.427526 4767 generic.go:334] "Generic (PLEG): container finished" podID="530c6c66-df17-4d54-b254-b0bf9e860545" containerID="86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528" exitCode=0 Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.427621 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" event={"ID":"530c6c66-df17-4d54-b254-b0bf9e860545","Type":"ContainerDied","Data":"86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528"} Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.427665 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" event={"ID":"530c6c66-df17-4d54-b254-b0bf9e860545","Type":"ContainerDied","Data":"72a62254ee09764951b982197eeeddfbb8bd1dcd62b140a3c6a7f4c1d09ca886"} Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.427693 4767 scope.go:117] "RemoveContainer" containerID="86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.428037 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-688b9f5b49-d9w94" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.456297 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "530c6c66-df17-4d54-b254-b0bf9e860545" (UID: "530c6c66-df17-4d54-b254-b0bf9e860545"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.466419 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.477261 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/530c6c66-df17-4d54-b254-b0bf9e860545-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.653841 4767 scope.go:117] "RemoveContainer" containerID="239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.750469 4767 scope.go:117] "RemoveContainer" containerID="86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528" Jan 28 18:53:08 crc kubenswrapper[4767]: E0128 18:53:08.752652 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528\": container with ID starting with 86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528 not found: ID does not exist" containerID="86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.752695 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528"} err="failed to get container status \"86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528\": rpc error: code = NotFound desc = could not find container \"86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528\": container with ID starting with 86affe8453a833c0f0c18c3aa379aa49b3a1a656c0feaa199c10e7e247259528 not found: ID does not exist" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.752729 4767 scope.go:117] "RemoveContainer" containerID="239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0" Jan 28 18:53:08 crc kubenswrapper[4767]: E0128 18:53:08.757888 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0\": container with ID starting with 239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0 not found: ID does not exist" containerID="239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.757935 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0"} err="failed to get container status \"239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0\": rpc error: code = NotFound desc = could not find container \"239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0\": container with ID starting with 239c3b23b873dc64c0959eb0ad4990ff3fed56c07eaf7b37778cd118361de5e0 not found: ID does not exist" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.818499 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c6f4dc4-6288-497b-b884-985966e6c5df" path="/var/lib/kubelet/pods/5c6f4dc4-6288-497b-b884-985966e6c5df/volumes" Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.838039 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d9w94"] Jan 28 18:53:08 crc kubenswrapper[4767]: I0128 18:53:08.848426 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-688b9f5b49-d9w94"] Jan 28 18:53:09 crc kubenswrapper[4767]: I0128 18:53:09.458623 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a9740068-f09c-4f68-9d36-166f7c26d7d9","Type":"ContainerStarted","Data":"def89e0d113dade49e4f52449e67593035204d27935ef6ff62f56b9bee88742c"} Jan 28 18:53:09 crc kubenswrapper[4767]: I0128 18:53:09.459948 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a9740068-f09c-4f68-9d36-166f7c26d7d9","Type":"ContainerStarted","Data":"eab56e797d8533e0d6e6de804197ab98cd955fcc0047e3154a73d58afc0d288b"} Jan 28 18:53:09 crc kubenswrapper[4767]: I0128 18:53:09.465218 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d9cee437-7c79-450e-906c-6f0d1c26a12c","Type":"ContainerStarted","Data":"72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c"} Jan 28 18:53:09 crc kubenswrapper[4767]: I0128 18:53:09.465462 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:09 crc kubenswrapper[4767]: I0128 18:53:09.487299 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=3.487275255 podStartE2EDuration="3.487275255s" podCreationTimestamp="2026-01-28 18:53:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:09.486122839 +0000 UTC m=+1395.450305713" watchObservedRunningTime="2026-01-28 18:53:09.487275255 +0000 UTC m=+1395.451458129" Jan 28 18:53:09 crc kubenswrapper[4767]: I0128 18:53:09.546648 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.546619058 podStartE2EDuration="3.546619058s" podCreationTimestamp="2026-01-28 18:53:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:09.514006874 +0000 UTC m=+1395.478189748" watchObservedRunningTime="2026-01-28 18:53:09.546619058 +0000 UTC m=+1395.510801932" Jan 28 18:53:10 crc kubenswrapper[4767]: I0128 18:53:10.480978 4767 generic.go:334] "Generic (PLEG): container finished" podID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerID="4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95" exitCode=0 Jan 28 18:53:10 crc kubenswrapper[4767]: I0128 18:53:10.481095 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerDied","Data":"4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95"} Jan 28 18:53:10 crc kubenswrapper[4767]: I0128 18:53:10.486649 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a9740068-f09c-4f68-9d36-166f7c26d7d9","Type":"ContainerStarted","Data":"b0478cdbf5e6639baeeeb8efa50d2a3e073e40ffcfbe618e6ae2b3f9f28aa543"} Jan 28 18:53:10 crc kubenswrapper[4767]: I0128 18:53:10.811020 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="530c6c66-df17-4d54-b254-b0bf9e860545" path="/var/lib/kubelet/pods/530c6c66-df17-4d54-b254-b0bf9e860545/volumes" Jan 28 18:53:11 crc kubenswrapper[4767]: I0128 18:53:11.525667 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.525641834 podStartE2EDuration="4.525641834s" podCreationTimestamp="2026-01-28 18:53:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:11.523370083 +0000 UTC m=+1397.487552957" watchObservedRunningTime="2026-01-28 18:53:11.525641834 +0000 UTC m=+1397.489824698" Jan 28 18:53:12 crc kubenswrapper[4767]: I0128 18:53:12.511610 4767 generic.go:334] "Generic (PLEG): container finished" podID="f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" containerID="b8eea234273b2b3910c136fb5ad82f22bb028b9f1b75f8ec921682db0abe37ed" exitCode=0 Jan 28 18:53:12 crc kubenswrapper[4767]: I0128 18:53:12.511718 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8x72k" event={"ID":"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f","Type":"ContainerDied","Data":"b8eea234273b2b3910c136fb5ad82f22bb028b9f1b75f8ec921682db0abe37ed"} Jan 28 18:53:12 crc kubenswrapper[4767]: I0128 18:53:12.813617 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 18:53:12 crc kubenswrapper[4767]: I0128 18:53:12.813684 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 18:53:13 crc kubenswrapper[4767]: I0128 18:53:13.977399 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.130367 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-scripts\") pod \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.130599 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qh9qf\" (UniqueName: \"kubernetes.io/projected/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-kube-api-access-qh9qf\") pod \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.130662 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-config-data\") pod \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.131669 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-combined-ca-bundle\") pod \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\" (UID: \"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f\") " Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.137890 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-scripts" (OuterVolumeSpecName: "scripts") pod "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" (UID: "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.138156 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-kube-api-access-qh9qf" (OuterVolumeSpecName: "kube-api-access-qh9qf") pod "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" (UID: "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f"). InnerVolumeSpecName "kube-api-access-qh9qf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.162268 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-config-data" (OuterVolumeSpecName: "config-data") pod "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" (UID: "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.163491 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" (UID: "f9f9d061-f0ef-4bb6-8b3c-82838fedc33f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.234517 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qh9qf\" (UniqueName: \"kubernetes.io/projected/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-kube-api-access-qh9qf\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.234579 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.234594 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.234605 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.533486 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-8x72k" event={"ID":"f9f9d061-f0ef-4bb6-8b3c-82838fedc33f","Type":"ContainerDied","Data":"2170ef28c9162a00fa6c6e4552be38e4f50b310bd984c20407c5fb9469d4f7d6"} Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.533974 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2170ef28c9162a00fa6c6e4552be38e4f50b310bd984c20407c5fb9469d4f7d6" Jan 28 18:53:14 crc kubenswrapper[4767]: I0128 18:53:14.533640 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-8x72k" Jan 28 18:53:15 crc kubenswrapper[4767]: I0128 18:53:15.454958 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:53:15 crc kubenswrapper[4767]: I0128 18:53:15.455020 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:53:16 crc kubenswrapper[4767]: I0128 18:53:16.925255 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.146854 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.146914 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.408365 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.463902 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.464288 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-log" containerID="cri-o://def89e0d113dade49e4f52449e67593035204d27935ef6ff62f56b9bee88742c" gracePeriod=30 Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.464433 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-metadata" containerID="cri-o://b0478cdbf5e6639baeeeb8efa50d2a3e073e40ffcfbe618e6ae2b3f9f28aa543" gracePeriod=30 Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.587120 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-log" containerID="cri-o://35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c" gracePeriod=30 Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.587335 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-api" containerID="cri-o://72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c" gracePeriod=30 Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.714092 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.202:8774/\": EOF" Jan 28 18:53:17 crc kubenswrapper[4767]: I0128 18:53:17.717881 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.202:8774/\": EOF" Jan 28 18:53:18 crc kubenswrapper[4767]: I0128 18:53:18.598651 4767 generic.go:334] "Generic (PLEG): container finished" podID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerID="b0478cdbf5e6639baeeeb8efa50d2a3e073e40ffcfbe618e6ae2b3f9f28aa543" exitCode=0 Jan 28 18:53:18 crc kubenswrapper[4767]: I0128 18:53:18.598699 4767 generic.go:334] "Generic (PLEG): container finished" podID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerID="def89e0d113dade49e4f52449e67593035204d27935ef6ff62f56b9bee88742c" exitCode=143 Jan 28 18:53:18 crc kubenswrapper[4767]: I0128 18:53:18.598745 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a9740068-f09c-4f68-9d36-166f7c26d7d9","Type":"ContainerDied","Data":"b0478cdbf5e6639baeeeb8efa50d2a3e073e40ffcfbe618e6ae2b3f9f28aa543"} Jan 28 18:53:18 crc kubenswrapper[4767]: I0128 18:53:18.598809 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a9740068-f09c-4f68-9d36-166f7c26d7d9","Type":"ContainerDied","Data":"def89e0d113dade49e4f52449e67593035204d27935ef6ff62f56b9bee88742c"} Jan 28 18:53:18 crc kubenswrapper[4767]: I0128 18:53:18.603973 4767 generic.go:334] "Generic (PLEG): container finished" podID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerID="35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c" exitCode=143 Jan 28 18:53:18 crc kubenswrapper[4767]: I0128 18:53:18.604027 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d9cee437-7c79-450e-906c-6f0d1c26a12c","Type":"ContainerDied","Data":"35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c"} Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.122155 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.281089 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-nova-metadata-tls-certs\") pod \"a9740068-f09c-4f68-9d36-166f7c26d7d9\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.281348 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vzr8\" (UniqueName: \"kubernetes.io/projected/a9740068-f09c-4f68-9d36-166f7c26d7d9-kube-api-access-9vzr8\") pod \"a9740068-f09c-4f68-9d36-166f7c26d7d9\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.281435 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-combined-ca-bundle\") pod \"a9740068-f09c-4f68-9d36-166f7c26d7d9\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.281528 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9740068-f09c-4f68-9d36-166f7c26d7d9-logs\") pod \"a9740068-f09c-4f68-9d36-166f7c26d7d9\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.281627 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-config-data\") pod \"a9740068-f09c-4f68-9d36-166f7c26d7d9\" (UID: \"a9740068-f09c-4f68-9d36-166f7c26d7d9\") " Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.282257 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9740068-f09c-4f68-9d36-166f7c26d7d9-logs" (OuterVolumeSpecName: "logs") pod "a9740068-f09c-4f68-9d36-166f7c26d7d9" (UID: "a9740068-f09c-4f68-9d36-166f7c26d7d9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.282822 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a9740068-f09c-4f68-9d36-166f7c26d7d9-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.295248 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9740068-f09c-4f68-9d36-166f7c26d7d9-kube-api-access-9vzr8" (OuterVolumeSpecName: "kube-api-access-9vzr8") pod "a9740068-f09c-4f68-9d36-166f7c26d7d9" (UID: "a9740068-f09c-4f68-9d36-166f7c26d7d9"). InnerVolumeSpecName "kube-api-access-9vzr8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.317062 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-config-data" (OuterVolumeSpecName: "config-data") pod "a9740068-f09c-4f68-9d36-166f7c26d7d9" (UID: "a9740068-f09c-4f68-9d36-166f7c26d7d9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.319342 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9740068-f09c-4f68-9d36-166f7c26d7d9" (UID: "a9740068-f09c-4f68-9d36-166f7c26d7d9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.344960 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "a9740068-f09c-4f68-9d36-166f7c26d7d9" (UID: "a9740068-f09c-4f68-9d36-166f7c26d7d9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.389252 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vzr8\" (UniqueName: \"kubernetes.io/projected/a9740068-f09c-4f68-9d36-166f7c26d7d9-kube-api-access-9vzr8\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.389653 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.389780 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.389887 4767 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a9740068-f09c-4f68-9d36-166f7c26d7d9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.629927 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a9740068-f09c-4f68-9d36-166f7c26d7d9","Type":"ContainerDied","Data":"eab56e797d8533e0d6e6de804197ab98cd955fcc0047e3154a73d58afc0d288b"} Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.629988 4767 scope.go:117] "RemoveContainer" containerID="b0478cdbf5e6639baeeeb8efa50d2a3e073e40ffcfbe618e6ae2b3f9f28aa543" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.630069 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.670655 4767 scope.go:117] "RemoveContainer" containerID="def89e0d113dade49e4f52449e67593035204d27935ef6ff62f56b9bee88742c" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.677101 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.695905 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.719511 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:19 crc kubenswrapper[4767]: E0128 18:53:19.720056 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="530c6c66-df17-4d54-b254-b0bf9e860545" containerName="dnsmasq-dns" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.720076 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="530c6c66-df17-4d54-b254-b0bf9e860545" containerName="dnsmasq-dns" Jan 28 18:53:19 crc kubenswrapper[4767]: E0128 18:53:19.720099 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" containerName="nova-manage" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.720107 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" containerName="nova-manage" Jan 28 18:53:19 crc kubenswrapper[4767]: E0128 18:53:19.720131 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="530c6c66-df17-4d54-b254-b0bf9e860545" containerName="init" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.720141 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="530c6c66-df17-4d54-b254-b0bf9e860545" containerName="init" Jan 28 18:53:19 crc kubenswrapper[4767]: E0128 18:53:19.720157 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-metadata" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.720165 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-metadata" Jan 28 18:53:19 crc kubenswrapper[4767]: E0128 18:53:19.720185 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-log" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.720192 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-log" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.723164 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-log" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.723231 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" containerName="nova-manage" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.723244 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="530c6c66-df17-4d54-b254-b0bf9e860545" containerName="dnsmasq-dns" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.723257 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" containerName="nova-metadata-metadata" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.724562 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.730507 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.730881 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.748517 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.798722 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.798812 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.798839 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cssfq\" (UniqueName: \"kubernetes.io/projected/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-kube-api-access-cssfq\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.798916 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-config-data\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.798963 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-logs\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.901334 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.901431 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.901483 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cssfq\" (UniqueName: \"kubernetes.io/projected/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-kube-api-access-cssfq\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.901611 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-config-data\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.901704 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-logs\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.902583 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-logs\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.909521 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.909899 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.911503 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-config-data\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:19 crc kubenswrapper[4767]: I0128 18:53:19.927802 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cssfq\" (UniqueName: \"kubernetes.io/projected/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-kube-api-access-cssfq\") pod \"nova-metadata-0\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " pod="openstack/nova-metadata-0" Jan 28 18:53:20 crc kubenswrapper[4767]: I0128 18:53:20.068710 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:53:20 crc kubenswrapper[4767]: W0128 18:53:20.673163 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2c61d2b3_8e23_458b_ab4c_2e0097f3496e.slice/crio-baa5ea9bfcb72209c00e6e405701ced137d67648256d7621c7485060c19f3021 WatchSource:0}: Error finding container baa5ea9bfcb72209c00e6e405701ced137d67648256d7621c7485060c19f3021: Status 404 returned error can't find the container with id baa5ea9bfcb72209c00e6e405701ced137d67648256d7621c7485060c19f3021 Jan 28 18:53:20 crc kubenswrapper[4767]: I0128 18:53:20.678804 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:53:20 crc kubenswrapper[4767]: I0128 18:53:20.809096 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9740068-f09c-4f68-9d36-166f7c26d7d9" path="/var/lib/kubelet/pods/a9740068-f09c-4f68-9d36-166f7c26d7d9/volumes" Jan 28 18:53:21 crc kubenswrapper[4767]: I0128 18:53:21.674093 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c61d2b3-8e23-458b-ab4c-2e0097f3496e","Type":"ContainerStarted","Data":"baa5ea9bfcb72209c00e6e405701ced137d67648256d7621c7485060c19f3021"} Jan 28 18:53:22 crc kubenswrapper[4767]: I0128 18:53:22.686153 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c61d2b3-8e23-458b-ab4c-2e0097f3496e","Type":"ContainerStarted","Data":"24a4e4a7af49a58a09c180060abf310a5c5b4fd2c88e758b86e82d4128987380"} Jan 28 18:53:23 crc kubenswrapper[4767]: I0128 18:53:23.709725 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c61d2b3-8e23-458b-ab4c-2e0097f3496e","Type":"ContainerStarted","Data":"cce7ef92922cb7e00c3f9bafec2029d583353b8ad4914634ffaa1060b755870a"} Jan 28 18:53:23 crc kubenswrapper[4767]: I0128 18:53:23.742313 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=4.742293095 podStartE2EDuration="4.742293095s" podCreationTimestamp="2026-01-28 18:53:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:23.738823747 +0000 UTC m=+1409.703006631" watchObservedRunningTime="2026-01-28 18:53:23.742293095 +0000 UTC m=+1409.706475969" Jan 28 18:53:24 crc kubenswrapper[4767]: I0128 18:53:24.156935 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 28 18:53:25 crc kubenswrapper[4767]: I0128 18:53:25.069679 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 18:53:25 crc kubenswrapper[4767]: I0128 18:53:25.069746 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.715568 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.777015 4767 generic.go:334] "Generic (PLEG): container finished" podID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerID="72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c" exitCode=0 Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.777118 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d9cee437-7c79-450e-906c-6f0d1c26a12c","Type":"ContainerDied","Data":"72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c"} Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.777158 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d9cee437-7c79-450e-906c-6f0d1c26a12c","Type":"ContainerDied","Data":"f5d052bffe1b1095ec0e30da6e427e8f44774f1b8fcde01e6536de965b0ba809"} Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.777493 4767 scope.go:117] "RemoveContainer" containerID="72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.777809 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.812904 4767 scope.go:117] "RemoveContainer" containerID="35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.832908 4767 scope.go:117] "RemoveContainer" containerID="72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c" Jan 28 18:53:26 crc kubenswrapper[4767]: E0128 18:53:26.833520 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c\": container with ID starting with 72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c not found: ID does not exist" containerID="72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.833576 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c"} err="failed to get container status \"72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c\": rpc error: code = NotFound desc = could not find container \"72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c\": container with ID starting with 72e056c06bfb441bafbc7b76742f636ffb2b9b51fea997546fd146bd2d4e932c not found: ID does not exist" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.833611 4767 scope.go:117] "RemoveContainer" containerID="35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c" Jan 28 18:53:26 crc kubenswrapper[4767]: E0128 18:53:26.834126 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c\": container with ID starting with 35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c not found: ID does not exist" containerID="35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.834170 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c"} err="failed to get container status \"35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c\": rpc error: code = NotFound desc = could not find container \"35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c\": container with ID starting with 35baa4bb8c123b0d1b2221c56ab2ac45f1027e49919d2d24e6ed15374ca59e1c not found: ID does not exist" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.862039 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-combined-ca-bundle\") pod \"d9cee437-7c79-450e-906c-6f0d1c26a12c\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.862457 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-config-data\") pod \"d9cee437-7c79-450e-906c-6f0d1c26a12c\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.862582 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42kmw\" (UniqueName: \"kubernetes.io/projected/d9cee437-7c79-450e-906c-6f0d1c26a12c-kube-api-access-42kmw\") pod \"d9cee437-7c79-450e-906c-6f0d1c26a12c\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.862641 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9cee437-7c79-450e-906c-6f0d1c26a12c-logs\") pod \"d9cee437-7c79-450e-906c-6f0d1c26a12c\" (UID: \"d9cee437-7c79-450e-906c-6f0d1c26a12c\") " Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.863530 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d9cee437-7c79-450e-906c-6f0d1c26a12c-logs" (OuterVolumeSpecName: "logs") pod "d9cee437-7c79-450e-906c-6f0d1c26a12c" (UID: "d9cee437-7c79-450e-906c-6f0d1c26a12c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.874348 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9cee437-7c79-450e-906c-6f0d1c26a12c-kube-api-access-42kmw" (OuterVolumeSpecName: "kube-api-access-42kmw") pod "d9cee437-7c79-450e-906c-6f0d1c26a12c" (UID: "d9cee437-7c79-450e-906c-6f0d1c26a12c"). InnerVolumeSpecName "kube-api-access-42kmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.892823 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9cee437-7c79-450e-906c-6f0d1c26a12c" (UID: "d9cee437-7c79-450e-906c-6f0d1c26a12c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.896310 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-config-data" (OuterVolumeSpecName: "config-data") pod "d9cee437-7c79-450e-906c-6f0d1c26a12c" (UID: "d9cee437-7c79-450e-906c-6f0d1c26a12c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.965065 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42kmw\" (UniqueName: \"kubernetes.io/projected/d9cee437-7c79-450e-906c-6f0d1c26a12c-kube-api-access-42kmw\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.965185 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d9cee437-7c79-450e-906c-6f0d1c26a12c-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.965200 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:26 crc kubenswrapper[4767]: I0128 18:53:26.965239 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9cee437-7c79-450e-906c-6f0d1c26a12c-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.115674 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.125674 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.147713 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:27 crc kubenswrapper[4767]: E0128 18:53:27.148409 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-log" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.148433 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-log" Jan 28 18:53:27 crc kubenswrapper[4767]: E0128 18:53:27.148445 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-api" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.148451 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-api" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.148665 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-log" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.148699 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" containerName="nova-api-api" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.149874 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.153191 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.159409 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.272823 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/466ee90a-37b7-4b56-a87e-e1175cc31b3d-logs\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.272904 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.273041 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szjgc\" (UniqueName: \"kubernetes.io/projected/466ee90a-37b7-4b56-a87e-e1175cc31b3d-kube-api-access-szjgc\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.273080 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-config-data\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.375816 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/466ee90a-37b7-4b56-a87e-e1175cc31b3d-logs\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.375900 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.375984 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szjgc\" (UniqueName: \"kubernetes.io/projected/466ee90a-37b7-4b56-a87e-e1175cc31b3d-kube-api-access-szjgc\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.376026 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-config-data\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.376445 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/466ee90a-37b7-4b56-a87e-e1175cc31b3d-logs\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.382472 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-config-data\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.392782 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.393535 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szjgc\" (UniqueName: \"kubernetes.io/projected/466ee90a-37b7-4b56-a87e-e1175cc31b3d-kube-api-access-szjgc\") pod \"nova-api-0\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.473593 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:27 crc kubenswrapper[4767]: I0128 18:53:27.954189 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:27 crc kubenswrapper[4767]: W0128 18:53:27.956141 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod466ee90a_37b7_4b56_a87e_e1175cc31b3d.slice/crio-594bfdb2a221fc4b456cdf10ff4abc7d5c6f789024c02e8334121c99ca2a4242 WatchSource:0}: Error finding container 594bfdb2a221fc4b456cdf10ff4abc7d5c6f789024c02e8334121c99ca2a4242: Status 404 returned error can't find the container with id 594bfdb2a221fc4b456cdf10ff4abc7d5c6f789024c02e8334121c99ca2a4242 Jan 28 18:53:28 crc kubenswrapper[4767]: I0128 18:53:28.812845 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9cee437-7c79-450e-906c-6f0d1c26a12c" path="/var/lib/kubelet/pods/d9cee437-7c79-450e-906c-6f0d1c26a12c/volumes" Jan 28 18:53:28 crc kubenswrapper[4767]: I0128 18:53:28.814424 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"466ee90a-37b7-4b56-a87e-e1175cc31b3d","Type":"ContainerStarted","Data":"db1ea5918cc4617957b9ae7a3998c0913b15c7a014c6b312bee16c30f2231584"} Jan 28 18:53:28 crc kubenswrapper[4767]: I0128 18:53:28.814459 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"466ee90a-37b7-4b56-a87e-e1175cc31b3d","Type":"ContainerStarted","Data":"9f89ddd0611806851b7ac80bd30fc64a7e2dd2eb8a70d8eebd9ef6e8a1c5b89a"} Jan 28 18:53:28 crc kubenswrapper[4767]: I0128 18:53:28.814474 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"466ee90a-37b7-4b56-a87e-e1175cc31b3d","Type":"ContainerStarted","Data":"594bfdb2a221fc4b456cdf10ff4abc7d5c6f789024c02e8334121c99ca2a4242"} Jan 28 18:53:30 crc kubenswrapper[4767]: I0128 18:53:30.069748 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 18:53:30 crc kubenswrapper[4767]: I0128 18:53:30.070765 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 18:53:31 crc kubenswrapper[4767]: I0128 18:53:31.087622 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:53:31 crc kubenswrapper[4767]: I0128 18:53:31.087684 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.671079 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.694963 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=7.694938884 podStartE2EDuration="7.694938884s" podCreationTimestamp="2026-01-28 18:53:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:29.850322882 +0000 UTC m=+1415.814505746" watchObservedRunningTime="2026-01-28 18:53:34.694938884 +0000 UTC m=+1420.659121758" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.763665 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-scripts\") pod \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.763787 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-sg-core-conf-yaml\") pod \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.763814 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-config-data\") pod \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.763964 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-combined-ca-bundle\") pod \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.764086 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m4jvr\" (UniqueName: \"kubernetes.io/projected/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-kube-api-access-m4jvr\") pod \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.764124 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-log-httpd\") pod \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.764160 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-run-httpd\") pod \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\" (UID: \"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.764806 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" (UID: "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.765282 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" (UID: "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.765670 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.765699 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.771663 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-scripts" (OuterVolumeSpecName: "scripts") pod "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" (UID: "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.771991 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-kube-api-access-m4jvr" (OuterVolumeSpecName: "kube-api-access-m4jvr") pod "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" (UID: "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8"). InnerVolumeSpecName "kube-api-access-m4jvr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.775253 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.810700 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" (UID: "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.862710 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" (UID: "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.866589 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-56rf2\" (UniqueName: \"kubernetes.io/projected/28e9a69e-421c-41d9-b00d-dcdcea88b828-kube-api-access-56rf2\") pod \"28e9a69e-421c-41d9-b00d-dcdcea88b828\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.866937 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-combined-ca-bundle\") pod \"28e9a69e-421c-41d9-b00d-dcdcea88b828\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.867096 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-config-data\") pod \"28e9a69e-421c-41d9-b00d-dcdcea88b828\" (UID: \"28e9a69e-421c-41d9-b00d-dcdcea88b828\") " Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.867700 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.867731 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.867745 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m4jvr\" (UniqueName: \"kubernetes.io/projected/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-kube-api-access-m4jvr\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.867759 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.870550 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28e9a69e-421c-41d9-b00d-dcdcea88b828-kube-api-access-56rf2" (OuterVolumeSpecName: "kube-api-access-56rf2") pod "28e9a69e-421c-41d9-b00d-dcdcea88b828" (UID: "28e9a69e-421c-41d9-b00d-dcdcea88b828"). InnerVolumeSpecName "kube-api-access-56rf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.873273 4767 generic.go:334] "Generic (PLEG): container finished" podID="28e9a69e-421c-41d9-b00d-dcdcea88b828" containerID="f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d" exitCode=137 Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.873359 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.873396 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"28e9a69e-421c-41d9-b00d-dcdcea88b828","Type":"ContainerDied","Data":"f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d"} Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.873464 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"28e9a69e-421c-41d9-b00d-dcdcea88b828","Type":"ContainerDied","Data":"8d4fd1ac2f11e38b611980d762b5f816c4d330f3923582c20ebb1c04eb231828"} Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.873489 4767 scope.go:117] "RemoveContainer" containerID="f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.880412 4767 generic.go:334] "Generic (PLEG): container finished" podID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerID="adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406" exitCode=137 Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.880478 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.880480 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerDied","Data":"adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406"} Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.880668 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8","Type":"ContainerDied","Data":"fc7ebda56fa089a4d90d3e7efbc7021abdd31e61e3b292761bb444d6135743ba"} Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.889104 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-config-data" (OuterVolumeSpecName: "config-data") pod "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" (UID: "8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.901564 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-config-data" (OuterVolumeSpecName: "config-data") pod "28e9a69e-421c-41d9-b00d-dcdcea88b828" (UID: "28e9a69e-421c-41d9-b00d-dcdcea88b828"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.903608 4767 scope.go:117] "RemoveContainer" containerID="f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d" Jan 28 18:53:34 crc kubenswrapper[4767]: E0128 18:53:34.904355 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d\": container with ID starting with f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d not found: ID does not exist" containerID="f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.904413 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d"} err="failed to get container status \"f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d\": rpc error: code = NotFound desc = could not find container \"f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d\": container with ID starting with f8dbc2176ff67420147c50741a06ec74b22aea9e690f976a374eebfd617f830d not found: ID does not exist" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.904449 4767 scope.go:117] "RemoveContainer" containerID="adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.904630 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "28e9a69e-421c-41d9-b00d-dcdcea88b828" (UID: "28e9a69e-421c-41d9-b00d-dcdcea88b828"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.943343 4767 scope.go:117] "RemoveContainer" containerID="74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.964355 4767 scope.go:117] "RemoveContainer" containerID="dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.970828 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.970865 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-56rf2\" (UniqueName: \"kubernetes.io/projected/28e9a69e-421c-41d9-b00d-dcdcea88b828-kube-api-access-56rf2\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.970878 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.970890 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/28e9a69e-421c-41d9-b00d-dcdcea88b828-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:34 crc kubenswrapper[4767]: I0128 18:53:34.987842 4767 scope.go:117] "RemoveContainer" containerID="4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.015314 4767 scope.go:117] "RemoveContainer" containerID="adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.016029 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406\": container with ID starting with adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406 not found: ID does not exist" containerID="adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.016112 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406"} err="failed to get container status \"adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406\": rpc error: code = NotFound desc = could not find container \"adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406\": container with ID starting with adc90cafce248b7f2f561f995a498e082a296cd03815c1cc904144a259ae1406 not found: ID does not exist" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.016183 4767 scope.go:117] "RemoveContainer" containerID="74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.016545 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c\": container with ID starting with 74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c not found: ID does not exist" containerID="74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.016576 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c"} err="failed to get container status \"74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c\": rpc error: code = NotFound desc = could not find container \"74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c\": container with ID starting with 74222e3dda0bc820f5d53c44f9aedba0cb62664496a8cd8ff58d3171b455b32c not found: ID does not exist" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.016595 4767 scope.go:117] "RemoveContainer" containerID="dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.016979 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec\": container with ID starting with dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec not found: ID does not exist" containerID="dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.017015 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec"} err="failed to get container status \"dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec\": rpc error: code = NotFound desc = could not find container \"dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec\": container with ID starting with dda9a52462d85bce764284ec58fb3f6f498cde5a6644f29778b3dad68968a5ec not found: ID does not exist" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.017037 4767 scope.go:117] "RemoveContainer" containerID="4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.018622 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95\": container with ID starting with 4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95 not found: ID does not exist" containerID="4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.018664 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95"} err="failed to get container status \"4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95\": rpc error: code = NotFound desc = could not find container \"4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95\": container with ID starting with 4832795f54874756d4aacb51cb7b7a301efeb0113abb3579769ca95ecdde6d95 not found: ID does not exist" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.230230 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.258619 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.294469 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.311392 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.331277 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.332102 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28e9a69e-421c-41d9-b00d-dcdcea88b828" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332128 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="28e9a69e-421c-41d9-b00d-dcdcea88b828" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.332171 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-central-agent" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332180 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-central-agent" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.332190 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-notification-agent" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332197 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-notification-agent" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.332228 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="proxy-httpd" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332235 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="proxy-httpd" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.332265 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="sg-core" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332273 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="sg-core" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332481 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-notification-agent" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332502 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="sg-core" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332525 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="ceilometer-central-agent" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332541 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" containerName="proxy-httpd" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.332552 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="28e9a69e-421c-41d9-b00d-dcdcea88b828" containerName="nova-cell1-novncproxy-novncproxy" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.333515 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.335986 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.336544 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.336676 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.348863 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.365427 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.368808 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.372897 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.373502 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.383969 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488123 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488176 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488228 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488271 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488301 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvzzf\" (UniqueName: \"kubernetes.io/projected/c67e5c73-3338-4eff-b207-79826ea0a546-kube-api-access-rvzzf\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488566 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-config-data\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488668 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-scripts\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488684 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpffg\" (UniqueName: \"kubernetes.io/projected/af20411f-dff0-46d6-9405-7e3f4b28f309-kube-api-access-bpffg\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488735 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488767 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488823 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-run-httpd\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.488838 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-log-httpd\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591309 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-scripts\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591352 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpffg\" (UniqueName: \"kubernetes.io/projected/af20411f-dff0-46d6-9405-7e3f4b28f309-kube-api-access-bpffg\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591394 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591423 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591465 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-run-httpd\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591483 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-log-httpd\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591523 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591546 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591566 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591599 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591622 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvzzf\" (UniqueName: \"kubernetes.io/projected/c67e5c73-3338-4eff-b207-79826ea0a546-kube-api-access-rvzzf\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.591642 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-config-data\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.593343 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-log-httpd\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.598183 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-run-httpd\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.600698 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-scripts\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.610967 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.615768 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.620943 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.622039 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-config-data\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.622050 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.625442 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpffg\" (UniqueName: \"kubernetes.io/projected/af20411f-dff0-46d6-9405-7e3f4b28f309-kube-api-access-bpffg\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.625592 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvzzf\" (UniqueName: \"kubernetes.io/projected/c67e5c73-3338-4eff-b207-79826ea0a546-kube-api-access-rvzzf\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.629861 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/af20411f-dff0-46d6-9405-7e3f4b28f309-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"af20411f-dff0-46d6-9405-7e3f4b28f309\") " pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.654047 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.778676 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.863307 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.876698 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.899265 4767 generic.go:334] "Generic (PLEG): container finished" podID="7b762052-77b4-487b-9dfc-479e0478cd58" containerID="4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d" exitCode=137 Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.899386 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7b762052-77b4-487b-9dfc-479e0478cd58","Type":"ContainerDied","Data":"4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d"} Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.899424 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7b762052-77b4-487b-9dfc-479e0478cd58","Type":"ContainerDied","Data":"761688b15b3a8dd6a74d6057d536368c53a0d4c22c0232420cfbf6fa1109b6df"} Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.899447 4767 scope.go:117] "RemoveContainer" containerID="4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.899607 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.901497 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7cs8f\" (UniqueName: \"kubernetes.io/projected/7b762052-77b4-487b-9dfc-479e0478cd58-kube-api-access-7cs8f\") pod \"7b762052-77b4-487b-9dfc-479e0478cd58\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.901696 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-config-data\") pod \"7b762052-77b4-487b-9dfc-479e0478cd58\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.901849 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-combined-ca-bundle\") pod \"7b762052-77b4-487b-9dfc-479e0478cd58\" (UID: \"7b762052-77b4-487b-9dfc-479e0478cd58\") " Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.912483 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b762052-77b4-487b-9dfc-479e0478cd58-kube-api-access-7cs8f" (OuterVolumeSpecName: "kube-api-access-7cs8f") pod "7b762052-77b4-487b-9dfc-479e0478cd58" (UID: "7b762052-77b4-487b-9dfc-479e0478cd58"). InnerVolumeSpecName "kube-api-access-7cs8f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.943851 4767 scope.go:117] "RemoveContainer" containerID="4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d" Jan 28 18:53:35 crc kubenswrapper[4767]: E0128 18:53:35.944440 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d\": container with ID starting with 4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d not found: ID does not exist" containerID="4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.944513 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d"} err="failed to get container status \"4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d\": rpc error: code = NotFound desc = could not find container \"4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d\": container with ID starting with 4fd7044dac3d9530404598e4c11c60a07461afed959bbfb2f2cf276e9b5f0a9d not found: ID does not exist" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.948234 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-config-data" (OuterVolumeSpecName: "config-data") pod "7b762052-77b4-487b-9dfc-479e0478cd58" (UID: "7b762052-77b4-487b-9dfc-479e0478cd58"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:35 crc kubenswrapper[4767]: I0128 18:53:35.950177 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7b762052-77b4-487b-9dfc-479e0478cd58" (UID: "7b762052-77b4-487b-9dfc-479e0478cd58"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.006137 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.006180 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7cs8f\" (UniqueName: \"kubernetes.io/projected/7b762052-77b4-487b-9dfc-479e0478cd58-kube-api-access-7cs8f\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.006193 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7b762052-77b4-487b-9dfc-479e0478cd58-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.257450 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.275013 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.295446 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:53:36 crc kubenswrapper[4767]: E0128 18:53:36.296115 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b762052-77b4-487b-9dfc-479e0478cd58" containerName="nova-scheduler-scheduler" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.296134 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b762052-77b4-487b-9dfc-479e0478cd58" containerName="nova-scheduler-scheduler" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.296450 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b762052-77b4-487b-9dfc-479e0478cd58" containerName="nova-scheduler-scheduler" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.297505 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.306487 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.316512 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.412091 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 28 18:53:36 crc kubenswrapper[4767]: W0128 18:53:36.414928 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaf20411f_dff0_46d6_9405_7e3f4b28f309.slice/crio-d998730b11c6a535bbdef4c05d5bf5b2d711a47d7dd220dacbc97b54652243bc WatchSource:0}: Error finding container d998730b11c6a535bbdef4c05d5bf5b2d711a47d7dd220dacbc97b54652243bc: Status 404 returned error can't find the container with id d998730b11c6a535bbdef4c05d5bf5b2d711a47d7dd220dacbc97b54652243bc Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.417452 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-config-data\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.417533 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.417688 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9kxt5\" (UniqueName: \"kubernetes.io/projected/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-kube-api-access-9kxt5\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: W0128 18:53:36.499664 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc67e5c73_3338_4eff_b207_79826ea0a546.slice/crio-9455bacafd4d0b9a7e7751795ff05391b811ec429bf80c9e0f3a644a86323849 WatchSource:0}: Error finding container 9455bacafd4d0b9a7e7751795ff05391b811ec429bf80c9e0f3a644a86323849: Status 404 returned error can't find the container with id 9455bacafd4d0b9a7e7751795ff05391b811ec429bf80c9e0f3a644a86323849 Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.500545 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.504019 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.519888 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.520747 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9kxt5\" (UniqueName: \"kubernetes.io/projected/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-kube-api-access-9kxt5\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.520816 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-config-data\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.527616 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.530102 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-config-data\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.544579 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9kxt5\" (UniqueName: \"kubernetes.io/projected/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-kube-api-access-9kxt5\") pod \"nova-scheduler-0\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.669403 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.809994 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28e9a69e-421c-41d9-b00d-dcdcea88b828" path="/var/lib/kubelet/pods/28e9a69e-421c-41d9-b00d-dcdcea88b828/volumes" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.810883 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b762052-77b4-487b-9dfc-479e0478cd58" path="/var/lib/kubelet/pods/7b762052-77b4-487b-9dfc-479e0478cd58/volumes" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.811581 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8" path="/var/lib/kubelet/pods/8f8797cb-7ce0-4be7-8dc7-cbbf2cbf4ec8/volumes" Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.932018 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"af20411f-dff0-46d6-9405-7e3f4b28f309","Type":"ContainerStarted","Data":"4e5aadf5f9dac8742510ff55ef56799333a0e78b0de1d7b36a4ac6134ffa7baa"} Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.932696 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"af20411f-dff0-46d6-9405-7e3f4b28f309","Type":"ContainerStarted","Data":"d998730b11c6a535bbdef4c05d5bf5b2d711a47d7dd220dacbc97b54652243bc"} Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.935155 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerStarted","Data":"9455bacafd4d0b9a7e7751795ff05391b811ec429bf80c9e0f3a644a86323849"} Jan 28 18:53:36 crc kubenswrapper[4767]: I0128 18:53:36.968773 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.968741863 podStartE2EDuration="1.968741863s" podCreationTimestamp="2026-01-28 18:53:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:36.958147921 +0000 UTC m=+1422.922330795" watchObservedRunningTime="2026-01-28 18:53:36.968741863 +0000 UTC m=+1422.932924737" Jan 28 18:53:37 crc kubenswrapper[4767]: W0128 18:53:37.176465 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0be7e2a2_7a75_40f6_b9dd_2b56e4f28ecd.slice/crio-c6a87bb4082cb29ae36dea13312afb207631f59492ea471cb174b87603e5d730 WatchSource:0}: Error finding container c6a87bb4082cb29ae36dea13312afb207631f59492ea471cb174b87603e5d730: Status 404 returned error can't find the container with id c6a87bb4082cb29ae36dea13312afb207631f59492ea471cb174b87603e5d730 Jan 28 18:53:37 crc kubenswrapper[4767]: I0128 18:53:37.177664 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:53:37 crc kubenswrapper[4767]: I0128 18:53:37.475524 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:53:37 crc kubenswrapper[4767]: I0128 18:53:37.476005 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:53:37 crc kubenswrapper[4767]: I0128 18:53:37.948960 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd","Type":"ContainerStarted","Data":"1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1"} Jan 28 18:53:37 crc kubenswrapper[4767]: I0128 18:53:37.949018 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd","Type":"ContainerStarted","Data":"c6a87bb4082cb29ae36dea13312afb207631f59492ea471cb174b87603e5d730"} Jan 28 18:53:37 crc kubenswrapper[4767]: I0128 18:53:37.952344 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerStarted","Data":"06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904"} Jan 28 18:53:37 crc kubenswrapper[4767]: I0128 18:53:37.976158 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.9761321120000002 podStartE2EDuration="1.976132112s" podCreationTimestamp="2026-01-28 18:53:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:37.968558395 +0000 UTC m=+1423.932741269" watchObservedRunningTime="2026-01-28 18:53:37.976132112 +0000 UTC m=+1423.940314986" Jan 28 18:53:38 crc kubenswrapper[4767]: I0128 18:53:38.557415 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.205:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:53:38 crc kubenswrapper[4767]: I0128 18:53:38.557419 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.205:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:53:38 crc kubenswrapper[4767]: I0128 18:53:38.964761 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerStarted","Data":"b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916"} Jan 28 18:53:39 crc kubenswrapper[4767]: I0128 18:53:39.978685 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerStarted","Data":"e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f"} Jan 28 18:53:40 crc kubenswrapper[4767]: I0128 18:53:40.080018 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 18:53:40 crc kubenswrapper[4767]: I0128 18:53:40.081462 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 18:53:40 crc kubenswrapper[4767]: I0128 18:53:40.089121 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 18:53:40 crc kubenswrapper[4767]: I0128 18:53:40.863628 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:41 crc kubenswrapper[4767]: I0128 18:53:41.005875 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 18:53:41 crc kubenswrapper[4767]: I0128 18:53:41.669693 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 18:53:42 crc kubenswrapper[4767]: I0128 18:53:42.012961 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerStarted","Data":"fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83"} Jan 28 18:53:42 crc kubenswrapper[4767]: I0128 18:53:42.040175 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.123675126 podStartE2EDuration="7.040153372s" podCreationTimestamp="2026-01-28 18:53:35 +0000 UTC" firstStartedPulling="2026-01-28 18:53:36.503552882 +0000 UTC m=+1422.467735756" lastFinishedPulling="2026-01-28 18:53:41.420031128 +0000 UTC m=+1427.384214002" observedRunningTime="2026-01-28 18:53:42.039264774 +0000 UTC m=+1428.003447648" watchObservedRunningTime="2026-01-28 18:53:42.040153372 +0000 UTC m=+1428.004336246" Jan 28 18:53:43 crc kubenswrapper[4767]: I0128 18:53:43.028176 4767 generic.go:334] "Generic (PLEG): container finished" podID="bd968858-329e-4d57-8cd7-364a5e852eea" containerID="e073f7a0342841c72e27008fe373a0b937c630b20a94bc7a967404b5f9f4202f" exitCode=0 Jan 28 18:53:43 crc kubenswrapper[4767]: I0128 18:53:43.028664 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" event={"ID":"bd968858-329e-4d57-8cd7-364a5e852eea","Type":"ContainerDied","Data":"e073f7a0342841c72e27008fe373a0b937c630b20a94bc7a967404b5f9f4202f"} Jan 28 18:53:43 crc kubenswrapper[4767]: I0128 18:53:43.029797 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.433680 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.529415 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rsr9b\" (UniqueName: \"kubernetes.io/projected/bd968858-329e-4d57-8cd7-364a5e852eea-kube-api-access-rsr9b\") pod \"bd968858-329e-4d57-8cd7-364a5e852eea\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.529518 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-config-data\") pod \"bd968858-329e-4d57-8cd7-364a5e852eea\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.529640 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-scripts\") pod \"bd968858-329e-4d57-8cd7-364a5e852eea\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.529800 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-combined-ca-bundle\") pod \"bd968858-329e-4d57-8cd7-364a5e852eea\" (UID: \"bd968858-329e-4d57-8cd7-364a5e852eea\") " Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.538086 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd968858-329e-4d57-8cd7-364a5e852eea-kube-api-access-rsr9b" (OuterVolumeSpecName: "kube-api-access-rsr9b") pod "bd968858-329e-4d57-8cd7-364a5e852eea" (UID: "bd968858-329e-4d57-8cd7-364a5e852eea"). InnerVolumeSpecName "kube-api-access-rsr9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.538611 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-scripts" (OuterVolumeSpecName: "scripts") pod "bd968858-329e-4d57-8cd7-364a5e852eea" (UID: "bd968858-329e-4d57-8cd7-364a5e852eea"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.564851 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-config-data" (OuterVolumeSpecName: "config-data") pod "bd968858-329e-4d57-8cd7-364a5e852eea" (UID: "bd968858-329e-4d57-8cd7-364a5e852eea"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.573838 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd968858-329e-4d57-8cd7-364a5e852eea" (UID: "bd968858-329e-4d57-8cd7-364a5e852eea"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.632317 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.632359 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rsr9b\" (UniqueName: \"kubernetes.io/projected/bd968858-329e-4d57-8cd7-364a5e852eea-kube-api-access-rsr9b\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.632389 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:44 crc kubenswrapper[4767]: I0128 18:53:44.632398 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd968858-329e-4d57-8cd7-364a5e852eea-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.055125 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" event={"ID":"bd968858-329e-4d57-8cd7-364a5e852eea","Type":"ContainerDied","Data":"022d4cc40e71f52a4865efd9da768743d6a198fc73e69d953171c2b293add2a7"} Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.055460 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="022d4cc40e71f52a4865efd9da768743d6a198fc73e69d953171c2b293add2a7" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.055280 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-t8vsr" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.150915 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 18:53:45 crc kubenswrapper[4767]: E0128 18:53:45.152138 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd968858-329e-4d57-8cd7-364a5e852eea" containerName="nova-cell1-conductor-db-sync" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.152161 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd968858-329e-4d57-8cd7-364a5e852eea" containerName="nova-cell1-conductor-db-sync" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.152397 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd968858-329e-4d57-8cd7-364a5e852eea" containerName="nova-cell1-conductor-db-sync" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.174710 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.177076 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.182362 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.249110 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21f835c6-b7b5-413c-8f9f-e349583860fc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.249195 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xwtqt\" (UniqueName: \"kubernetes.io/projected/21f835c6-b7b5-413c-8f9f-e349583860fc-kube-api-access-xwtqt\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.249540 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21f835c6-b7b5-413c-8f9f-e349583860fc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.352777 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21f835c6-b7b5-413c-8f9f-e349583860fc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.352836 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xwtqt\" (UniqueName: \"kubernetes.io/projected/21f835c6-b7b5-413c-8f9f-e349583860fc-kube-api-access-xwtqt\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.352905 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21f835c6-b7b5-413c-8f9f-e349583860fc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.361882 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/21f835c6-b7b5-413c-8f9f-e349583860fc-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.365989 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/21f835c6-b7b5-413c-8f9f-e349583860fc-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.374058 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xwtqt\" (UniqueName: \"kubernetes.io/projected/21f835c6-b7b5-413c-8f9f-e349583860fc-kube-api-access-xwtqt\") pod \"nova-cell1-conductor-0\" (UID: \"21f835c6-b7b5-413c-8f9f-e349583860fc\") " pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.454969 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.455109 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.455172 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.457574 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6c4deef50f94ebc84f432ab68abee6b83fa4675bb3fde9668560bfed495791e5"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.457661 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://6c4deef50f94ebc84f432ab68abee6b83fa4675bb3fde9668560bfed495791e5" gracePeriod=600 Jan 28 18:53:45 crc kubenswrapper[4767]: I0128 18:53:45.496465 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:45.864257 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:45.890588 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.013730 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 28 18:53:46 crc kubenswrapper[4767]: W0128 18:53:46.020326 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod21f835c6_b7b5_413c_8f9f_e349583860fc.slice/crio-f1a3dcaa11948f517bd6e020f7c1edfb81aaef2f6a00ae7be3abb3351a75079c WatchSource:0}: Error finding container f1a3dcaa11948f517bd6e020f7c1edfb81aaef2f6a00ae7be3abb3351a75079c: Status 404 returned error can't find the container with id f1a3dcaa11948f517bd6e020f7c1edfb81aaef2f6a00ae7be3abb3351a75079c Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.066918 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"21f835c6-b7b5-413c-8f9f-e349583860fc","Type":"ContainerStarted","Data":"f1a3dcaa11948f517bd6e020f7c1edfb81aaef2f6a00ae7be3abb3351a75079c"} Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.073098 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="6c4deef50f94ebc84f432ab68abee6b83fa4675bb3fde9668560bfed495791e5" exitCode=0 Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.073145 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"6c4deef50f94ebc84f432ab68abee6b83fa4675bb3fde9668560bfed495791e5"} Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.073231 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152"} Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.073270 4767 scope.go:117] "RemoveContainer" containerID="efe49b1f3887d0c6654c94b4c1818b6bf7a2508307ca13c8afacae337561c559" Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.096879 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.670324 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 18:53:46 crc kubenswrapper[4767]: I0128 18:53:46.703450 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.089092 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"21f835c6-b7b5-413c-8f9f-e349583860fc","Type":"ContainerStarted","Data":"ceec7f9a8ba8c74e589bb3cbca6816fc7d5f135d36cd2c4a945e676808057e19"} Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.091707 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.110700 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.110682003 podStartE2EDuration="2.110682003s" podCreationTimestamp="2026-01-28 18:53:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:47.109744723 +0000 UTC m=+1433.073927597" watchObservedRunningTime="2026-01-28 18:53:47.110682003 +0000 UTC m=+1433.074864877" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.133228 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.478371 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.479029 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.479475 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.479505 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.483581 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.484084 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.707250 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-bbjv2"] Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.710237 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.736307 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-bbjv2"] Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.823359 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkzzs\" (UniqueName: \"kubernetes.io/projected/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-kube-api-access-hkzzs\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.823509 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.823706 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.823924 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.823986 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-config\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.825703 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.929546 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.929721 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.929759 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-config\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.929791 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.929834 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkzzs\" (UniqueName: \"kubernetes.io/projected/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-kube-api-access-hkzzs\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.929876 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.931081 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-swift-storage-0\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.931101 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-sb\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.931946 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-config\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.932076 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-svc\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.932641 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-nb\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:47 crc kubenswrapper[4767]: I0128 18:53:47.956050 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkzzs\" (UniqueName: \"kubernetes.io/projected/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-kube-api-access-hkzzs\") pod \"dnsmasq-dns-f84f9ccf-bbjv2\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:48 crc kubenswrapper[4767]: I0128 18:53:48.057773 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:48 crc kubenswrapper[4767]: I0128 18:53:48.677364 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-bbjv2"] Jan 28 18:53:48 crc kubenswrapper[4767]: W0128 18:53:48.693721 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ba5c851_cdd7_40f6_b48e_885ca6a6c95a.slice/crio-5a7745195e634cac4777a6b6bb2c3629100d57b41551479f42aea4cce6206208 WatchSource:0}: Error finding container 5a7745195e634cac4777a6b6bb2c3629100d57b41551479f42aea4cce6206208: Status 404 returned error can't find the container with id 5a7745195e634cac4777a6b6bb2c3629100d57b41551479f42aea4cce6206208 Jan 28 18:53:49 crc kubenswrapper[4767]: I0128 18:53:49.137749 4767 generic.go:334] "Generic (PLEG): container finished" podID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerID="18466654814e5319f7e1f0c3d7eb680c837d446a0289f97230ad5195aeabe787" exitCode=0 Jan 28 18:53:49 crc kubenswrapper[4767]: I0128 18:53:49.137835 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" event={"ID":"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a","Type":"ContainerDied","Data":"18466654814e5319f7e1f0c3d7eb680c837d446a0289f97230ad5195aeabe787"} Jan 28 18:53:49 crc kubenswrapper[4767]: I0128 18:53:49.138228 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" event={"ID":"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a","Type":"ContainerStarted","Data":"5a7745195e634cac4777a6b6bb2c3629100d57b41551479f42aea4cce6206208"} Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.150108 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" event={"ID":"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a","Type":"ContainerStarted","Data":"faaabe6fdad6ea7947adb493a2dba4d21a6361a315a5cef9a9eb3d3dcaf75f0c"} Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.150849 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.189500 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" podStartSLOduration=3.189473729 podStartE2EDuration="3.189473729s" podCreationTimestamp="2026-01-28 18:53:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:50.180974272 +0000 UTC m=+1436.145157146" watchObservedRunningTime="2026-01-28 18:53:50.189473729 +0000 UTC m=+1436.153656603" Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.595937 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.596683 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-central-agent" containerID="cri-o://06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904" gracePeriod=30 Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.597313 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="proxy-httpd" containerID="cri-o://fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83" gracePeriod=30 Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.597365 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="sg-core" containerID="cri-o://e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f" gracePeriod=30 Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.597402 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-notification-agent" containerID="cri-o://b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916" gracePeriod=30 Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.654567 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.655033 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-log" containerID="cri-o://9f89ddd0611806851b7ac80bd30fc64a7e2dd2eb8a70d8eebd9ef6e8a1c5b89a" gracePeriod=30 Jan 28 18:53:50 crc kubenswrapper[4767]: I0128 18:53:50.655433 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-api" containerID="cri-o://db1ea5918cc4617957b9ae7a3998c0913b15c7a014c6b312bee16c30f2231584" gracePeriod=30 Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.163858 4767 generic.go:334] "Generic (PLEG): container finished" podID="c67e5c73-3338-4eff-b207-79826ea0a546" containerID="fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83" exitCode=0 Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.164328 4767 generic.go:334] "Generic (PLEG): container finished" podID="c67e5c73-3338-4eff-b207-79826ea0a546" containerID="e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f" exitCode=2 Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.164346 4767 generic.go:334] "Generic (PLEG): container finished" podID="c67e5c73-3338-4eff-b207-79826ea0a546" containerID="06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904" exitCode=0 Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.163896 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerDied","Data":"fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83"} Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.164413 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerDied","Data":"e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f"} Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.164431 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerDied","Data":"06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904"} Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.168859 4767 generic.go:334] "Generic (PLEG): container finished" podID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerID="9f89ddd0611806851b7ac80bd30fc64a7e2dd2eb8a70d8eebd9ef6e8a1c5b89a" exitCode=143 Jan 28 18:53:51 crc kubenswrapper[4767]: I0128 18:53:51.168952 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"466ee90a-37b7-4b56-a87e-e1175cc31b3d","Type":"ContainerDied","Data":"9f89ddd0611806851b7ac80bd30fc64a7e2dd2eb8a70d8eebd9ef6e8a1c5b89a"} Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.144351 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.181987 4767 generic.go:334] "Generic (PLEG): container finished" podID="c67e5c73-3338-4eff-b207-79826ea0a546" containerID="b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916" exitCode=0 Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.182232 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerDied","Data":"b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916"} Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.182331 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"c67e5c73-3338-4eff-b207-79826ea0a546","Type":"ContainerDied","Data":"9455bacafd4d0b9a7e7751795ff05391b811ec429bf80c9e0f3a644a86323849"} Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.182356 4767 scope.go:117] "RemoveContainer" containerID="fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.182455 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.228315 4767 scope.go:117] "RemoveContainer" containerID="e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242037 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-run-httpd\") pod \"c67e5c73-3338-4eff-b207-79826ea0a546\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242320 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-combined-ca-bundle\") pod \"c67e5c73-3338-4eff-b207-79826ea0a546\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242452 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-scripts\") pod \"c67e5c73-3338-4eff-b207-79826ea0a546\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242488 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-config-data\") pod \"c67e5c73-3338-4eff-b207-79826ea0a546\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242583 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-log-httpd\") pod \"c67e5c73-3338-4eff-b207-79826ea0a546\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242594 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "c67e5c73-3338-4eff-b207-79826ea0a546" (UID: "c67e5c73-3338-4eff-b207-79826ea0a546"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242701 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rvzzf\" (UniqueName: \"kubernetes.io/projected/c67e5c73-3338-4eff-b207-79826ea0a546-kube-api-access-rvzzf\") pod \"c67e5c73-3338-4eff-b207-79826ea0a546\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.242763 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-sg-core-conf-yaml\") pod \"c67e5c73-3338-4eff-b207-79826ea0a546\" (UID: \"c67e5c73-3338-4eff-b207-79826ea0a546\") " Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.243329 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "c67e5c73-3338-4eff-b207-79826ea0a546" (UID: "c67e5c73-3338-4eff-b207-79826ea0a546"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.244348 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.244385 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/c67e5c73-3338-4eff-b207-79826ea0a546-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.261832 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-scripts" (OuterVolumeSpecName: "scripts") pod "c67e5c73-3338-4eff-b207-79826ea0a546" (UID: "c67e5c73-3338-4eff-b207-79826ea0a546"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.262945 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c67e5c73-3338-4eff-b207-79826ea0a546-kube-api-access-rvzzf" (OuterVolumeSpecName: "kube-api-access-rvzzf") pod "c67e5c73-3338-4eff-b207-79826ea0a546" (UID: "c67e5c73-3338-4eff-b207-79826ea0a546"). InnerVolumeSpecName "kube-api-access-rvzzf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.269150 4767 scope.go:117] "RemoveContainer" containerID="b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.286620 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "c67e5c73-3338-4eff-b207-79826ea0a546" (UID: "c67e5c73-3338-4eff-b207-79826ea0a546"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.347438 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.347937 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rvzzf\" (UniqueName: \"kubernetes.io/projected/c67e5c73-3338-4eff-b207-79826ea0a546-kube-api-access-rvzzf\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.348010 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.370260 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c67e5c73-3338-4eff-b207-79826ea0a546" (UID: "c67e5c73-3338-4eff-b207-79826ea0a546"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.408774 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-config-data" (OuterVolumeSpecName: "config-data") pod "c67e5c73-3338-4eff-b207-79826ea0a546" (UID: "c67e5c73-3338-4eff-b207-79826ea0a546"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.450015 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.450080 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c67e5c73-3338-4eff-b207-79826ea0a546-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.458699 4767 scope.go:117] "RemoveContainer" containerID="06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.497618 4767 scope.go:117] "RemoveContainer" containerID="fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83" Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.498567 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83\": container with ID starting with fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83 not found: ID does not exist" containerID="fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.498643 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83"} err="failed to get container status \"fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83\": rpc error: code = NotFound desc = could not find container \"fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83\": container with ID starting with fdf1217be24de737db0be4a461c53cc7b12bda171fd50a40f2bbbe8ba1046a83 not found: ID does not exist" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.498690 4767 scope.go:117] "RemoveContainer" containerID="e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f" Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.499328 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f\": container with ID starting with e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f not found: ID does not exist" containerID="e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.499387 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f"} err="failed to get container status \"e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f\": rpc error: code = NotFound desc = could not find container \"e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f\": container with ID starting with e5e55d8bb542df7aa863f258a21d5c007e85923a601ed74860d314a24434cc3f not found: ID does not exist" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.499437 4767 scope.go:117] "RemoveContainer" containerID="b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916" Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.499885 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916\": container with ID starting with b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916 not found: ID does not exist" containerID="b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.499923 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916"} err="failed to get container status \"b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916\": rpc error: code = NotFound desc = could not find container \"b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916\": container with ID starting with b2273fdad16a96a8428ae741e34553572762fd9817275a485421ca0f67c62916 not found: ID does not exist" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.499948 4767 scope.go:117] "RemoveContainer" containerID="06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904" Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.500382 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904\": container with ID starting with 06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904 not found: ID does not exist" containerID="06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.500427 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904"} err="failed to get container status \"06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904\": rpc error: code = NotFound desc = could not find container \"06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904\": container with ID starting with 06e7fb11ea7d29e967b09b294c05d8a128a151927b7012640d14b5c32d5bf904 not found: ID does not exist" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.529463 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.544582 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.574269 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.575016 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-notification-agent" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575045 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-notification-agent" Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.575067 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="proxy-httpd" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575077 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="proxy-httpd" Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.575112 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-central-agent" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575122 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-central-agent" Jan 28 18:53:52 crc kubenswrapper[4767]: E0128 18:53:52.575150 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="sg-core" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575157 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="sg-core" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575552 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-central-agent" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575580 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="ceilometer-notification-agent" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575607 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="proxy-httpd" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.575631 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" containerName="sg-core" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.578359 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.582673 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.582984 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.604563 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.654755 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-log-httpd\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.654831 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.655017 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nz7pg\" (UniqueName: \"kubernetes.io/projected/0afb05df-ac84-44f8-ac68-0bde2d1eea08-kube-api-access-nz7pg\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.655142 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-scripts\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.655180 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.655262 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-config-data\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.655307 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-run-httpd\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.757136 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nz7pg\" (UniqueName: \"kubernetes.io/projected/0afb05df-ac84-44f8-ac68-0bde2d1eea08-kube-api-access-nz7pg\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.757799 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-scripts\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.757841 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.757879 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-config-data\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.757929 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-run-httpd\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.757981 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-log-httpd\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.758006 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.759918 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-run-httpd\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.760632 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-log-httpd\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.763843 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.764383 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-config-data\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.764518 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.766481 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-scripts\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.783558 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nz7pg\" (UniqueName: \"kubernetes.io/projected/0afb05df-ac84-44f8-ac68-0bde2d1eea08-kube-api-access-nz7pg\") pod \"ceilometer-0\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " pod="openstack/ceilometer-0" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.813874 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c67e5c73-3338-4eff-b207-79826ea0a546" path="/var/lib/kubelet/pods/c67e5c73-3338-4eff-b207-79826ea0a546/volumes" Jan 28 18:53:52 crc kubenswrapper[4767]: I0128 18:53:52.913234 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:53:53 crc kubenswrapper[4767]: I0128 18:53:53.049148 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:53 crc kubenswrapper[4767]: I0128 18:53:53.493531 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:53:54 crc kubenswrapper[4767]: I0128 18:53:54.211464 4767 generic.go:334] "Generic (PLEG): container finished" podID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerID="db1ea5918cc4617957b9ae7a3998c0913b15c7a014c6b312bee16c30f2231584" exitCode=0 Jan 28 18:53:54 crc kubenswrapper[4767]: I0128 18:53:54.211569 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"466ee90a-37b7-4b56-a87e-e1175cc31b3d","Type":"ContainerDied","Data":"db1ea5918cc4617957b9ae7a3998c0913b15c7a014c6b312bee16c30f2231584"} Jan 28 18:53:54 crc kubenswrapper[4767]: I0128 18:53:54.213281 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerStarted","Data":"67c146a14af9790c2e39d232026ec2fc9aa965a5f5aef1f6b5e0f030cc88fb32"} Jan 28 18:53:54 crc kubenswrapper[4767]: I0128 18:53:54.993924 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.060076 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-combined-ca-bundle\") pod \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.060506 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szjgc\" (UniqueName: \"kubernetes.io/projected/466ee90a-37b7-4b56-a87e-e1175cc31b3d-kube-api-access-szjgc\") pod \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.060560 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/466ee90a-37b7-4b56-a87e-e1175cc31b3d-logs\") pod \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.060617 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-config-data\") pod \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\" (UID: \"466ee90a-37b7-4b56-a87e-e1175cc31b3d\") " Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.061252 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/466ee90a-37b7-4b56-a87e-e1175cc31b3d-logs" (OuterVolumeSpecName: "logs") pod "466ee90a-37b7-4b56-a87e-e1175cc31b3d" (UID: "466ee90a-37b7-4b56-a87e-e1175cc31b3d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.067948 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/466ee90a-37b7-4b56-a87e-e1175cc31b3d-kube-api-access-szjgc" (OuterVolumeSpecName: "kube-api-access-szjgc") pod "466ee90a-37b7-4b56-a87e-e1175cc31b3d" (UID: "466ee90a-37b7-4b56-a87e-e1175cc31b3d"). InnerVolumeSpecName "kube-api-access-szjgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.115766 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "466ee90a-37b7-4b56-a87e-e1175cc31b3d" (UID: "466ee90a-37b7-4b56-a87e-e1175cc31b3d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.129806 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-config-data" (OuterVolumeSpecName: "config-data") pod "466ee90a-37b7-4b56-a87e-e1175cc31b3d" (UID: "466ee90a-37b7-4b56-a87e-e1175cc31b3d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.164242 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.164295 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szjgc\" (UniqueName: \"kubernetes.io/projected/466ee90a-37b7-4b56-a87e-e1175cc31b3d-kube-api-access-szjgc\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.164329 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/466ee90a-37b7-4b56-a87e-e1175cc31b3d-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.164343 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/466ee90a-37b7-4b56-a87e-e1175cc31b3d-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.239341 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"466ee90a-37b7-4b56-a87e-e1175cc31b3d","Type":"ContainerDied","Data":"594bfdb2a221fc4b456cdf10ff4abc7d5c6f789024c02e8334121c99ca2a4242"} Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.239420 4767 scope.go:117] "RemoveContainer" containerID="db1ea5918cc4617957b9ae7a3998c0913b15c7a014c6b312bee16c30f2231584" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.239516 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.248040 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerStarted","Data":"c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa"} Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.296035 4767 scope.go:117] "RemoveContainer" containerID="9f89ddd0611806851b7ac80bd30fc64a7e2dd2eb8a70d8eebd9ef6e8a1c5b89a" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.308006 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.320576 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.345092 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:55 crc kubenswrapper[4767]: E0128 18:53:55.346147 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-log" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.346171 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-log" Jan 28 18:53:55 crc kubenswrapper[4767]: E0128 18:53:55.346319 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-api" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.346367 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-api" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.346708 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-api" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.346730 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" containerName="nova-api-log" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.348051 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.356013 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.356121 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.356317 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.379496 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.471827 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.471891 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/506716a2-1403-4388-b6f1-d0d9c2784dde-logs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.472042 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fmbbx\" (UniqueName: \"kubernetes.io/projected/506716a2-1403-4388-b6f1-d0d9c2784dde-kube-api-access-fmbbx\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.472429 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-config-data\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.472581 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-internal-tls-certs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.472613 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-public-tls-certs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.554664 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.574387 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/506716a2-1403-4388-b6f1-d0d9c2784dde-logs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.574472 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fmbbx\" (UniqueName: \"kubernetes.io/projected/506716a2-1403-4388-b6f1-d0d9c2784dde-kube-api-access-fmbbx\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.574562 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-config-data\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.574619 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-internal-tls-certs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.574643 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-public-tls-certs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.574709 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.576741 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/506716a2-1403-4388-b6f1-d0d9c2784dde-logs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.586164 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-public-tls-certs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.586954 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-config-data\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.605442 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.608133 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-internal-tls-certs\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.620236 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fmbbx\" (UniqueName: \"kubernetes.io/projected/506716a2-1403-4388-b6f1-d0d9c2784dde-kube-api-access-fmbbx\") pod \"nova-api-0\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " pod="openstack/nova-api-0" Jan 28 18:53:55 crc kubenswrapper[4767]: I0128 18:53:55.681607 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.286911 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.302841 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerStarted","Data":"9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8"} Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.302915 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerStarted","Data":"03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460"} Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.413628 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-6rtsh"] Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.415815 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.420869 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.421111 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.437776 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6rtsh"] Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.506490 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-scripts\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.506624 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fl2gv\" (UniqueName: \"kubernetes.io/projected/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-kube-api-access-fl2gv\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.506753 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.506814 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-config-data\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.609176 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-scripts\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.609298 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fl2gv\" (UniqueName: \"kubernetes.io/projected/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-kube-api-access-fl2gv\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.609399 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.609452 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-config-data\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.619546 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-scripts\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.629007 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.646849 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-config-data\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.663130 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fl2gv\" (UniqueName: \"kubernetes.io/projected/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-kube-api-access-fl2gv\") pod \"nova-cell1-cell-mapping-6rtsh\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.804323 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:53:56 crc kubenswrapper[4767]: I0128 18:53:56.809157 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="466ee90a-37b7-4b56-a87e-e1175cc31b3d" path="/var/lib/kubelet/pods/466ee90a-37b7-4b56-a87e-e1175cc31b3d/volumes" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.319142 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"506716a2-1403-4388-b6f1-d0d9c2784dde","Type":"ContainerStarted","Data":"e6005ccc1d27f945843b2ca25cee9d515528cc03d7506f24e5979da572fa071d"} Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.320143 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"506716a2-1403-4388-b6f1-d0d9c2784dde","Type":"ContainerStarted","Data":"877a9abd2aa860496637699a9dd6fbc9692539289878af2a2f4b9374dbd2fd6a"} Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.320166 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"506716a2-1403-4388-b6f1-d0d9c2784dde","Type":"ContainerStarted","Data":"767a3a5775682770896680004a0aea1c994dae3bd52a7e64aeaabd22894bd0b1"} Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.360462 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4c45g"] Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.363624 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.366244 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.366194788 podStartE2EDuration="2.366194788s" podCreationTimestamp="2026-01-28 18:53:55 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:57.355166682 +0000 UTC m=+1443.319349556" watchObservedRunningTime="2026-01-28 18:53:57.366194788 +0000 UTC m=+1443.330377652" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.402873 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-6rtsh"] Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.432809 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8cg4x\" (UniqueName: \"kubernetes.io/projected/8753bef4-0577-412e-9bf7-3b311ecda4a1-kube-api-access-8cg4x\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.433009 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-catalog-content\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.433070 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-utilities\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.514418 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4c45g"] Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.536570 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-catalog-content\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.536662 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-utilities\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.536880 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8cg4x\" (UniqueName: \"kubernetes.io/projected/8753bef4-0577-412e-9bf7-3b311ecda4a1-kube-api-access-8cg4x\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.537997 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-utilities\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.541084 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-catalog-content\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.586385 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8cg4x\" (UniqueName: \"kubernetes.io/projected/8753bef4-0577-412e-9bf7-3b311ecda4a1-kube-api-access-8cg4x\") pod \"redhat-operators-4c45g\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:57 crc kubenswrapper[4767]: I0128 18:53:57.861029 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.059487 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.177679 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-8ntj2"] Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.178467 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" podUID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerName="dnsmasq-dns" containerID="cri-o://297ee69c14da0819d35c485005a508940e179ff9de18f00bbed9f0aab99405af" gracePeriod=10 Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.368076 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6rtsh" event={"ID":"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf","Type":"ContainerStarted","Data":"5419b3c6e5bacc7533521c2468285cb4edc97b700ec859eaefa3800ffb08504d"} Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.368144 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6rtsh" event={"ID":"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf","Type":"ContainerStarted","Data":"2e529e68a655d5a86a9a4c4754bced98c6a8d885532ea23fb99ea8f730d39b34"} Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.374394 4767 generic.go:334] "Generic (PLEG): container finished" podID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerID="297ee69c14da0819d35c485005a508940e179ff9de18f00bbed9f0aab99405af" exitCode=0 Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.375767 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" event={"ID":"6b257523-2a53-41a6-92a5-a2e7467b4851","Type":"ContainerDied","Data":"297ee69c14da0819d35c485005a508940e179ff9de18f00bbed9f0aab99405af"} Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.390603 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-6rtsh" podStartSLOduration=2.390579221 podStartE2EDuration="2.390579221s" podCreationTimestamp="2026-01-28 18:53:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:53:58.389779196 +0000 UTC m=+1444.353962080" watchObservedRunningTime="2026-01-28 18:53:58.390579221 +0000 UTC m=+1444.354762095" Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.513146 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4c45g"] Jan 28 18:53:58 crc kubenswrapper[4767]: W0128 18:53:58.542171 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8753bef4_0577_412e_9bf7_3b311ecda4a1.slice/crio-702433f8e5fe0c1b017acf6da47017f47b9fd026bfd4f697665aa4a85631a3d1 WatchSource:0}: Error finding container 702433f8e5fe0c1b017acf6da47017f47b9fd026bfd4f697665aa4a85631a3d1: Status 404 returned error can't find the container with id 702433f8e5fe0c1b017acf6da47017f47b9fd026bfd4f697665aa4a85631a3d1 Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.845036 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.974551 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wc9cv\" (UniqueName: \"kubernetes.io/projected/6b257523-2a53-41a6-92a5-a2e7467b4851-kube-api-access-wc9cv\") pod \"6b257523-2a53-41a6-92a5-a2e7467b4851\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.974618 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-config\") pod \"6b257523-2a53-41a6-92a5-a2e7467b4851\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.974667 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-sb\") pod \"6b257523-2a53-41a6-92a5-a2e7467b4851\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.974696 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-swift-storage-0\") pod \"6b257523-2a53-41a6-92a5-a2e7467b4851\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.974786 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-svc\") pod \"6b257523-2a53-41a6-92a5-a2e7467b4851\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " Jan 28 18:53:58 crc kubenswrapper[4767]: I0128 18:53:58.974819 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-nb\") pod \"6b257523-2a53-41a6-92a5-a2e7467b4851\" (UID: \"6b257523-2a53-41a6-92a5-a2e7467b4851\") " Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.025715 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b257523-2a53-41a6-92a5-a2e7467b4851-kube-api-access-wc9cv" (OuterVolumeSpecName: "kube-api-access-wc9cv") pod "6b257523-2a53-41a6-92a5-a2e7467b4851" (UID: "6b257523-2a53-41a6-92a5-a2e7467b4851"). InnerVolumeSpecName "kube-api-access-wc9cv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.086428 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wc9cv\" (UniqueName: \"kubernetes.io/projected/6b257523-2a53-41a6-92a5-a2e7467b4851-kube-api-access-wc9cv\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.088616 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "6b257523-2a53-41a6-92a5-a2e7467b4851" (UID: "6b257523-2a53-41a6-92a5-a2e7467b4851"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.149991 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-config" (OuterVolumeSpecName: "config") pod "6b257523-2a53-41a6-92a5-a2e7467b4851" (UID: "6b257523-2a53-41a6-92a5-a2e7467b4851"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.176905 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "6b257523-2a53-41a6-92a5-a2e7467b4851" (UID: "6b257523-2a53-41a6-92a5-a2e7467b4851"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.186557 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "6b257523-2a53-41a6-92a5-a2e7467b4851" (UID: "6b257523-2a53-41a6-92a5-a2e7467b4851"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.200980 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.201012 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.201023 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.201033 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.213986 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6b257523-2a53-41a6-92a5-a2e7467b4851" (UID: "6b257523-2a53-41a6-92a5-a2e7467b4851"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.303460 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b257523-2a53-41a6-92a5-a2e7467b4851-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.397909 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerStarted","Data":"d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a"} Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.401055 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" event={"ID":"6b257523-2a53-41a6-92a5-a2e7467b4851","Type":"ContainerDied","Data":"0f187c343e67431bd8d5d750fe90aa1c8ec742ed6b2a67d549c1a0110f5c9f6f"} Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.401597 4767 scope.go:117] "RemoveContainer" containerID="297ee69c14da0819d35c485005a508940e179ff9de18f00bbed9f0aab99405af" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.401864 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-568d7fd7cf-8ntj2" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.410782 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerStarted","Data":"d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07"} Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.410947 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerStarted","Data":"702433f8e5fe0c1b017acf6da47017f47b9fd026bfd4f697665aa4a85631a3d1"} Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.453029 4767 scope.go:117] "RemoveContainer" containerID="aead0fa7509e6af01dd88ac772fb037f232d2e3963a51413da8088a42415dc2a" Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.495121 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-8ntj2"] Jan 28 18:53:59 crc kubenswrapper[4767]: I0128 18:53:59.605936 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-568d7fd7cf-8ntj2"] Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.427282 4767 generic.go:334] "Generic (PLEG): container finished" podID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerID="d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07" exitCode=0 Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.427877 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerDied","Data":"d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07"} Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.434639 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-central-agent" containerID="cri-o://c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa" gracePeriod=30 Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.435153 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.435228 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="proxy-httpd" containerID="cri-o://d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a" gracePeriod=30 Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.435277 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="sg-core" containerID="cri-o://9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8" gracePeriod=30 Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.435315 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-notification-agent" containerID="cri-o://03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460" gracePeriod=30 Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.508495 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.612013998 podStartE2EDuration="8.508470206s" podCreationTimestamp="2026-01-28 18:53:52 +0000 UTC" firstStartedPulling="2026-01-28 18:53:53.493944828 +0000 UTC m=+1439.458127702" lastFinishedPulling="2026-01-28 18:53:58.390401036 +0000 UTC m=+1444.354583910" observedRunningTime="2026-01-28 18:54:00.500180576 +0000 UTC m=+1446.464363450" watchObservedRunningTime="2026-01-28 18:54:00.508470206 +0000 UTC m=+1446.472653080" Jan 28 18:54:00 crc kubenswrapper[4767]: I0128 18:54:00.809086 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b257523-2a53-41a6-92a5-a2e7467b4851" path="/var/lib/kubelet/pods/6b257523-2a53-41a6-92a5-a2e7467b4851/volumes" Jan 28 18:54:01 crc kubenswrapper[4767]: I0128 18:54:01.449819 4767 generic.go:334] "Generic (PLEG): container finished" podID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerID="d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a" exitCode=0 Jan 28 18:54:01 crc kubenswrapper[4767]: I0128 18:54:01.450989 4767 generic.go:334] "Generic (PLEG): container finished" podID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerID="9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8" exitCode=2 Jan 28 18:54:01 crc kubenswrapper[4767]: I0128 18:54:01.451081 4767 generic.go:334] "Generic (PLEG): container finished" podID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerID="03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460" exitCode=0 Jan 28 18:54:01 crc kubenswrapper[4767]: I0128 18:54:01.449944 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerDied","Data":"d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a"} Jan 28 18:54:01 crc kubenswrapper[4767]: I0128 18:54:01.451269 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerDied","Data":"9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8"} Jan 28 18:54:01 crc kubenswrapper[4767]: I0128 18:54:01.451312 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerDied","Data":"03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460"} Jan 28 18:54:03 crc kubenswrapper[4767]: I0128 18:54:03.513389 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerStarted","Data":"a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf"} Jan 28 18:54:04 crc kubenswrapper[4767]: I0128 18:54:04.530095 4767 generic.go:334] "Generic (PLEG): container finished" podID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerID="a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf" exitCode=0 Jan 28 18:54:04 crc kubenswrapper[4767]: I0128 18:54:04.530173 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerDied","Data":"a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf"} Jan 28 18:54:05 crc kubenswrapper[4767]: I0128 18:54:05.683287 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:54:05 crc kubenswrapper[4767]: I0128 18:54:05.684316 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:54:06 crc kubenswrapper[4767]: I0128 18:54:06.732418 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.212:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:54:06 crc kubenswrapper[4767]: I0128 18:54:06.732406 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.212:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:54:09 crc kubenswrapper[4767]: I0128 18:54:09.593930 4767 generic.go:334] "Generic (PLEG): container finished" podID="9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" containerID="5419b3c6e5bacc7533521c2468285cb4edc97b700ec859eaefa3800ffb08504d" exitCode=0 Jan 28 18:54:09 crc kubenswrapper[4767]: I0128 18:54:09.594051 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6rtsh" event={"ID":"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf","Type":"ContainerDied","Data":"5419b3c6e5bacc7533521c2468285cb4edc97b700ec859eaefa3800ffb08504d"} Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.107702 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.212429 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-config-data\") pod \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.212638 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fl2gv\" (UniqueName: \"kubernetes.io/projected/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-kube-api-access-fl2gv\") pod \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.212693 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-scripts\") pod \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.212733 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-combined-ca-bundle\") pod \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\" (UID: \"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf\") " Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.218828 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-scripts" (OuterVolumeSpecName: "scripts") pod "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" (UID: "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.232199 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-kube-api-access-fl2gv" (OuterVolumeSpecName: "kube-api-access-fl2gv") pod "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" (UID: "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf"). InnerVolumeSpecName "kube-api-access-fl2gv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.251963 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-config-data" (OuterVolumeSpecName: "config-data") pod "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" (UID: "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.254018 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" (UID: "9e74c600-b1ab-45a5-b4ea-171ceb03d9bf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.315664 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.315721 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fl2gv\" (UniqueName: \"kubernetes.io/projected/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-kube-api-access-fl2gv\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.315731 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.315740 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.625092 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-6rtsh" event={"ID":"9e74c600-b1ab-45a5-b4ea-171ceb03d9bf","Type":"ContainerDied","Data":"2e529e68a655d5a86a9a4c4754bced98c6a8d885532ea23fb99ea8f730d39b34"} Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.625854 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2e529e68a655d5a86a9a4c4754bced98c6a8d885532ea23fb99ea8f730d39b34" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.625175 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-6rtsh" Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.881092 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.881476 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-log" containerID="cri-o://24a4e4a7af49a58a09c180060abf310a5c5b4fd2c88e758b86e82d4128987380" gracePeriod=30 Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.881619 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-metadata" containerID="cri-o://cce7ef92922cb7e00c3f9bafec2029d583353b8ad4914634ffaa1060b755870a" gracePeriod=30 Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.903537 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.903878 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-log" containerID="cri-o://877a9abd2aa860496637699a9dd6fbc9692539289878af2a2f4b9374dbd2fd6a" gracePeriod=30 Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.904431 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-api" containerID="cri-o://e6005ccc1d27f945843b2ca25cee9d515528cc03d7506f24e5979da572fa071d" gracePeriod=30 Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.921022 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:54:11 crc kubenswrapper[4767]: I0128 18:54:11.921370 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" containerName="nova-scheduler-scheduler" containerID="cri-o://1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1" gracePeriod=30 Jan 28 18:54:12 crc kubenswrapper[4767]: I0128 18:54:12.639911 4767 generic.go:334] "Generic (PLEG): container finished" podID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerID="877a9abd2aa860496637699a9dd6fbc9692539289878af2a2f4b9374dbd2fd6a" exitCode=143 Jan 28 18:54:12 crc kubenswrapper[4767]: I0128 18:54:12.640120 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"506716a2-1403-4388-b6f1-d0d9c2784dde","Type":"ContainerDied","Data":"877a9abd2aa860496637699a9dd6fbc9692539289878af2a2f4b9374dbd2fd6a"} Jan 28 18:54:12 crc kubenswrapper[4767]: I0128 18:54:12.642767 4767 generic.go:334] "Generic (PLEG): container finished" podID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerID="24a4e4a7af49a58a09c180060abf310a5c5b4fd2c88e758b86e82d4128987380" exitCode=143 Jan 28 18:54:12 crc kubenswrapper[4767]: I0128 18:54:12.642811 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c61d2b3-8e23-458b-ab4c-2e0097f3496e","Type":"ContainerDied","Data":"24a4e4a7af49a58a09c180060abf310a5c5b4fd2c88e758b86e82d4128987380"} Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.069823 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": dial tcp 10.217.0.204:8775: connect: connection refused" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.073554 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": dial tcp 10.217.0.204:8775: connect: connection refused" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.468491 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.527090 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-run-httpd\") pod \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.527161 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-config-data\") pod \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.527195 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-sg-core-conf-yaml\") pod \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.527262 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-log-httpd\") pod \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.527333 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-combined-ca-bundle\") pod \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.527387 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nz7pg\" (UniqueName: \"kubernetes.io/projected/0afb05df-ac84-44f8-ac68-0bde2d1eea08-kube-api-access-nz7pg\") pod \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.527453 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-scripts\") pod \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\" (UID: \"0afb05df-ac84-44f8-ac68-0bde2d1eea08\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.529952 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0afb05df-ac84-44f8-ac68-0bde2d1eea08" (UID: "0afb05df-ac84-44f8-ac68-0bde2d1eea08"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.530502 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0afb05df-ac84-44f8-ac68-0bde2d1eea08" (UID: "0afb05df-ac84-44f8-ac68-0bde2d1eea08"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.544961 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0afb05df-ac84-44f8-ac68-0bde2d1eea08-kube-api-access-nz7pg" (OuterVolumeSpecName: "kube-api-access-nz7pg") pod "0afb05df-ac84-44f8-ac68-0bde2d1eea08" (UID: "0afb05df-ac84-44f8-ac68-0bde2d1eea08"). InnerVolumeSpecName "kube-api-access-nz7pg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.545402 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-scripts" (OuterVolumeSpecName: "scripts") pod "0afb05df-ac84-44f8-ac68-0bde2d1eea08" (UID: "0afb05df-ac84-44f8-ac68-0bde2d1eea08"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.590498 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0afb05df-ac84-44f8-ac68-0bde2d1eea08" (UID: "0afb05df-ac84-44f8-ac68-0bde2d1eea08"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.633039 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nz7pg\" (UniqueName: \"kubernetes.io/projected/0afb05df-ac84-44f8-ac68-0bde2d1eea08-kube-api-access-nz7pg\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.633458 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.633476 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.633486 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.633496 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0afb05df-ac84-44f8-ac68-0bde2d1eea08-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.686135 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0afb05df-ac84-44f8-ac68-0bde2d1eea08" (UID: "0afb05df-ac84-44f8-ac68-0bde2d1eea08"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.688937 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerStarted","Data":"790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5"} Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.693991 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.694061 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerDied","Data":"c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa"} Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.693821 4767 generic.go:334] "Generic (PLEG): container finished" podID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerID="c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa" exitCode=0 Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.694148 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0afb05df-ac84-44f8-ac68-0bde2d1eea08","Type":"ContainerDied","Data":"67c146a14af9790c2e39d232026ec2fc9aa965a5f5aef1f6b5e0f030cc88fb32"} Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.694178 4767 scope.go:117] "RemoveContainer" containerID="d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.699013 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c61d2b3-8e23-458b-ab4c-2e0097f3496e","Type":"ContainerDied","Data":"cce7ef92922cb7e00c3f9bafec2029d583353b8ad4914634ffaa1060b755870a"} Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.699093 4767 generic.go:334] "Generic (PLEG): container finished" podID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerID="cce7ef92922cb7e00c3f9bafec2029d583353b8ad4914634ffaa1060b755870a" exitCode=0 Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.699233 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"2c61d2b3-8e23-458b-ab4c-2e0097f3496e","Type":"ContainerDied","Data":"baa5ea9bfcb72209c00e6e405701ced137d67648256d7621c7485060c19f3021"} Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.699267 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="baa5ea9bfcb72209c00e6e405701ced137d67648256d7621c7485060c19f3021" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.738624 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.770132 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4c45g" podStartSLOduration=4.515982169 podStartE2EDuration="18.770097172s" podCreationTimestamp="2026-01-28 18:53:57 +0000 UTC" firstStartedPulling="2026-01-28 18:54:00.430670914 +0000 UTC m=+1446.394853788" lastFinishedPulling="2026-01-28 18:54:14.684785917 +0000 UTC m=+1460.648968791" observedRunningTime="2026-01-28 18:54:15.712996689 +0000 UTC m=+1461.677179563" watchObservedRunningTime="2026-01-28 18:54:15.770097172 +0000 UTC m=+1461.734280046" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.776760 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-config-data" (OuterVolumeSpecName: "config-data") pod "0afb05df-ac84-44f8-ac68-0bde2d1eea08" (UID: "0afb05df-ac84-44f8-ac68-0bde2d1eea08"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.844920 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.846580 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0afb05df-ac84-44f8-ac68-0bde2d1eea08-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.855843 4767 scope.go:117] "RemoveContainer" containerID="9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.925321 4767 scope.go:117] "RemoveContainer" containerID="03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.948571 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cssfq\" (UniqueName: \"kubernetes.io/projected/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-kube-api-access-cssfq\") pod \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.949137 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-logs\") pod \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.949269 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-config-data\") pod \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.949461 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-nova-metadata-tls-certs\") pod \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.949634 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-combined-ca-bundle\") pod \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\" (UID: \"2c61d2b3-8e23-458b-ab4c-2e0097f3496e\") " Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.958678 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-kube-api-access-cssfq" (OuterVolumeSpecName: "kube-api-access-cssfq") pod "2c61d2b3-8e23-458b-ab4c-2e0097f3496e" (UID: "2c61d2b3-8e23-458b-ab4c-2e0097f3496e"). InnerVolumeSpecName "kube-api-access-cssfq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:15 crc kubenswrapper[4767]: I0128 18:54:15.961728 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-logs" (OuterVolumeSpecName: "logs") pod "2c61d2b3-8e23-458b-ab4c-2e0097f3496e" (UID: "2c61d2b3-8e23-458b-ab4c-2e0097f3496e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.005814 4767 scope.go:117] "RemoveContainer" containerID="c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.008737 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-config-data" (OuterVolumeSpecName: "config-data") pod "2c61d2b3-8e23-458b-ab4c-2e0097f3496e" (UID: "2c61d2b3-8e23-458b-ab4c-2e0097f3496e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.010422 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2c61d2b3-8e23-458b-ab4c-2e0097f3496e" (UID: "2c61d2b3-8e23-458b-ab4c-2e0097f3496e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.049788 4767 scope.go:117] "RemoveContainer" containerID="d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.050032 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "2c61d2b3-8e23-458b-ab4c-2e0097f3496e" (UID: "2c61d2b3-8e23-458b-ab4c-2e0097f3496e"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.056170 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a\": container with ID starting with d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a not found: ID does not exist" containerID="d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.056267 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a"} err="failed to get container status \"d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a\": rpc error: code = NotFound desc = could not find container \"d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a\": container with ID starting with d66fd2f499d51099683c97d2baf95fb337b68a7b4be26b701072b48272ba883a not found: ID does not exist" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.056318 4767 scope.go:117] "RemoveContainer" containerID="9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.058318 4767 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.058446 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.058520 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cssfq\" (UniqueName: \"kubernetes.io/projected/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-kube-api-access-cssfq\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.058586 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.058677 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2c61d2b3-8e23-458b-ab4c-2e0097f3496e-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.060570 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8\": container with ID starting with 9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8 not found: ID does not exist" containerID="9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.060666 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8"} err="failed to get container status \"9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8\": rpc error: code = NotFound desc = could not find container \"9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8\": container with ID starting with 9403eccbf8ca9e905b455e2d432e4516a3847cf6babe8e4d174465671b915cd8 not found: ID does not exist" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.060721 4767 scope.go:117] "RemoveContainer" containerID="03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.061423 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460\": container with ID starting with 03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460 not found: ID does not exist" containerID="03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.061467 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460"} err="failed to get container status \"03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460\": rpc error: code = NotFound desc = could not find container \"03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460\": container with ID starting with 03f14f14ecd4f2fde09c9cb0fbc50ff4fd0c2392603ee01534dbb696022a1460 not found: ID does not exist" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.061492 4767 scope.go:117] "RemoveContainer" containerID="c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.063272 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa\": container with ID starting with c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa not found: ID does not exist" containerID="c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.063312 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa"} err="failed to get container status \"c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa\": rpc error: code = NotFound desc = could not find container \"c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa\": container with ID starting with c7042fad9a21e7d74f6f7a0f7f748a5fa1aa653c6bfcc2a79b5d9f9dce86d2aa not found: ID does not exist" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.064899 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.077676 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.105479 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106051 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="sg-core" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106069 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="sg-core" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106081 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-log" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106088 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-log" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106104 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-central-agent" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106114 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-central-agent" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106129 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-metadata" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106135 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-metadata" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106147 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="proxy-httpd" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106153 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="proxy-httpd" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106167 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" containerName="nova-manage" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106172 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" containerName="nova-manage" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106181 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerName="dnsmasq-dns" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106186 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerName="dnsmasq-dns" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106197 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerName="init" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106221 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerName="init" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.106236 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-notification-agent" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106245 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-notification-agent" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106433 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-log" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106448 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" containerName="nova-metadata-metadata" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106459 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b257523-2a53-41a6-92a5-a2e7467b4851" containerName="dnsmasq-dns" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106469 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="sg-core" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106479 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-central-agent" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106493 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" containerName="nova-manage" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106507 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="ceilometer-notification-agent" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.106516 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" containerName="proxy-httpd" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.108903 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.112159 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.113037 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.159062 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.160971 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-scripts\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.161108 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-config-data\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.161154 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-log-httpd\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.161227 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcf4j\" (UniqueName: \"kubernetes.io/projected/0acd7c9f-7969-45e3-834c-65f77c66f7df-kube-api-access-gcf4j\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.161281 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-run-httpd\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.161361 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.161435 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.263080 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-scripts\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.264421 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-config-data\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.264469 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-log-httpd\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.264505 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcf4j\" (UniqueName: \"kubernetes.io/projected/0acd7c9f-7969-45e3-834c-65f77c66f7df-kube-api-access-gcf4j\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.264543 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-run-httpd\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.264607 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.264647 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.265440 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-run-httpd\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.265508 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-log-httpd\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.269820 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.269885 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-scripts\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.270319 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-config-data\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.271514 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.292028 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcf4j\" (UniqueName: \"kubernetes.io/projected/0acd7c9f-7969-45e3-834c-65f77c66f7df-kube-api-access-gcf4j\") pod \"ceilometer-0\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.434846 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.671775 4767 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1 is running failed: container process not found" containerID="1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.675686 4767 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1 is running failed: container process not found" containerID="1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.681699 4767 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1 is running failed: container process not found" containerID="1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 28 18:54:16 crc kubenswrapper[4767]: E0128 18:54:16.681769 4767 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" containerName="nova-scheduler-scheduler" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.734233 4767 generic.go:334] "Generic (PLEG): container finished" podID="0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" containerID="1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1" exitCode=0 Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.734328 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd","Type":"ContainerDied","Data":"1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1"} Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.736906 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.847287 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0afb05df-ac84-44f8-ac68-0bde2d1eea08" path="/var/lib/kubelet/pods/0afb05df-ac84-44f8-ac68-0bde2d1eea08/volumes" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.848717 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.850708 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.866881 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.869374 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.871774 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.877345 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.884353 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4z62t\" (UniqueName: \"kubernetes.io/projected/532091ea-ba10-45b3-8843-bf1582e4e30e-kube-api-access-4z62t\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.884398 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.884470 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/532091ea-ba10-45b3-8843-bf1582e4e30e-logs\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.884519 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.884580 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-config-data\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.887347 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.986548 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4z62t\" (UniqueName: \"kubernetes.io/projected/532091ea-ba10-45b3-8843-bf1582e4e30e-kube-api-access-4z62t\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.986597 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.986649 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/532091ea-ba10-45b3-8843-bf1582e4e30e-logs\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.986699 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.986765 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-config-data\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.989785 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/532091ea-ba10-45b3-8843-bf1582e4e30e-logs\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.995406 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-config-data\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:16 crc kubenswrapper[4767]: I0128 18:54:16.996250 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.002297 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/532091ea-ba10-45b3-8843-bf1582e4e30e-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.008158 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4z62t\" (UniqueName: \"kubernetes.io/projected/532091ea-ba10-45b3-8843-bf1582e4e30e-kube-api-access-4z62t\") pod \"nova-metadata-0\" (UID: \"532091ea-ba10-45b3-8843-bf1582e4e30e\") " pod="openstack/nova-metadata-0" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.132911 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.143278 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.190410 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9kxt5\" (UniqueName: \"kubernetes.io/projected/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-kube-api-access-9kxt5\") pod \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.190561 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-combined-ca-bundle\") pod \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.190858 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-config-data\") pod \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\" (UID: \"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd\") " Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.199667 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-kube-api-access-9kxt5" (OuterVolumeSpecName: "kube-api-access-9kxt5") pod "0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" (UID: "0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd"). InnerVolumeSpecName "kube-api-access-9kxt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.249966 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.275185 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" (UID: "0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.283468 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-config-data" (OuterVolumeSpecName: "config-data") pod "0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" (UID: "0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.303418 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9kxt5\" (UniqueName: \"kubernetes.io/projected/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-kube-api-access-9kxt5\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.304955 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.305034 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.806790 4767 generic.go:334] "Generic (PLEG): container finished" podID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerID="e6005ccc1d27f945843b2ca25cee9d515528cc03d7506f24e5979da572fa071d" exitCode=0 Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.806959 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"506716a2-1403-4388-b6f1-d0d9c2784dde","Type":"ContainerDied","Data":"e6005ccc1d27f945843b2ca25cee9d515528cc03d7506f24e5979da572fa071d"} Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.812948 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd","Type":"ContainerDied","Data":"c6a87bb4082cb29ae36dea13312afb207631f59492ea471cb174b87603e5d730"} Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.813022 4767 scope.go:117] "RemoveContainer" containerID="1aed7cdbef53920b80c271f482683aa11afd0e45efb68b188b76030be60af7e1" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.813153 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.822880 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerStarted","Data":"55c4da1a6ae9320a77309b1ebfa0a83008b30f3327de814fd4e19d5180a06e27"} Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.838574 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.861268 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.861333 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.912574 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:54:17 crc kubenswrapper[4767]: I0128 18:54:17.925716 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.020508 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:54:18 crc kubenswrapper[4767]: E0128 18:54:18.024167 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" containerName="nova-scheduler-scheduler" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.032398 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" containerName="nova-scheduler-scheduler" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.042125 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" containerName="nova-scheduler-scheduler" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.043466 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.055234 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.072231 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.151414 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee1ce6f4-07db-4542-bdf2-49fc980801eb-config-data\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.151561 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee1ce6f4-07db-4542-bdf2-49fc980801eb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.151597 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-99qlj\" (UniqueName: \"kubernetes.io/projected/ee1ce6f4-07db-4542-bdf2-49fc980801eb-kube-api-access-99qlj\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.204448 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.256024 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee1ce6f4-07db-4542-bdf2-49fc980801eb-config-data\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.256334 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee1ce6f4-07db-4542-bdf2-49fc980801eb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.256395 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-99qlj\" (UniqueName: \"kubernetes.io/projected/ee1ce6f4-07db-4542-bdf2-49fc980801eb-kube-api-access-99qlj\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.262559 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee1ce6f4-07db-4542-bdf2-49fc980801eb-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.280054 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee1ce6f4-07db-4542-bdf2-49fc980801eb-config-data\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.281263 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-99qlj\" (UniqueName: \"kubernetes.io/projected/ee1ce6f4-07db-4542-bdf2-49fc980801eb-kube-api-access-99qlj\") pod \"nova-scheduler-0\" (UID: \"ee1ce6f4-07db-4542-bdf2-49fc980801eb\") " pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.358416 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fmbbx\" (UniqueName: \"kubernetes.io/projected/506716a2-1403-4388-b6f1-d0d9c2784dde-kube-api-access-fmbbx\") pod \"506716a2-1403-4388-b6f1-d0d9c2784dde\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.358576 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-internal-tls-certs\") pod \"506716a2-1403-4388-b6f1-d0d9c2784dde\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.358623 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-public-tls-certs\") pod \"506716a2-1403-4388-b6f1-d0d9c2784dde\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.358671 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-combined-ca-bundle\") pod \"506716a2-1403-4388-b6f1-d0d9c2784dde\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.358699 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-config-data\") pod \"506716a2-1403-4388-b6f1-d0d9c2784dde\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.358722 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/506716a2-1403-4388-b6f1-d0d9c2784dde-logs\") pod \"506716a2-1403-4388-b6f1-d0d9c2784dde\" (UID: \"506716a2-1403-4388-b6f1-d0d9c2784dde\") " Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.359740 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/506716a2-1403-4388-b6f1-d0d9c2784dde-logs" (OuterVolumeSpecName: "logs") pod "506716a2-1403-4388-b6f1-d0d9c2784dde" (UID: "506716a2-1403-4388-b6f1-d0d9c2784dde"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.389485 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/506716a2-1403-4388-b6f1-d0d9c2784dde-kube-api-access-fmbbx" (OuterVolumeSpecName: "kube-api-access-fmbbx") pod "506716a2-1403-4388-b6f1-d0d9c2784dde" (UID: "506716a2-1403-4388-b6f1-d0d9c2784dde"). InnerVolumeSpecName "kube-api-access-fmbbx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.422706 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.449283 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "506716a2-1403-4388-b6f1-d0d9c2784dde" (UID: "506716a2-1403-4388-b6f1-d0d9c2784dde"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.461747 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fmbbx\" (UniqueName: \"kubernetes.io/projected/506716a2-1403-4388-b6f1-d0d9c2784dde-kube-api-access-fmbbx\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.461797 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.461814 4767 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/506716a2-1403-4388-b6f1-d0d9c2784dde-logs\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.497385 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-config-data" (OuterVolumeSpecName: "config-data") pod "506716a2-1403-4388-b6f1-d0d9c2784dde" (UID: "506716a2-1403-4388-b6f1-d0d9c2784dde"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.504273 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "506716a2-1403-4388-b6f1-d0d9c2784dde" (UID: "506716a2-1403-4388-b6f1-d0d9c2784dde"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.532152 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "506716a2-1403-4388-b6f1-d0d9c2784dde" (UID: "506716a2-1403-4388-b6f1-d0d9c2784dde"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.564841 4767 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.564895 4767 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.564913 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/506716a2-1403-4388-b6f1-d0d9c2784dde-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.809327 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd" path="/var/lib/kubelet/pods/0be7e2a2-7a75-40f6-b9dd-2b56e4f28ecd/volumes" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.810571 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c61d2b3-8e23-458b-ab4c-2e0097f3496e" path="/var/lib/kubelet/pods/2c61d2b3-8e23-458b-ab4c-2e0097f3496e/volumes" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.842943 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerStarted","Data":"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9"} Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.847147 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"506716a2-1403-4388-b6f1-d0d9c2784dde","Type":"ContainerDied","Data":"767a3a5775682770896680004a0aea1c994dae3bd52a7e64aeaabd22894bd0b1"} Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.847250 4767 scope.go:117] "RemoveContainer" containerID="e6005ccc1d27f945843b2ca25cee9d515528cc03d7506f24e5979da572fa071d" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.847303 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.856489 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"532091ea-ba10-45b3-8843-bf1582e4e30e","Type":"ContainerStarted","Data":"b0b38535df2f236ca8c1490a77041f3f2e03186f57c7953675bb6d7ac51d1933"} Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.856558 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"532091ea-ba10-45b3-8843-bf1582e4e30e","Type":"ContainerStarted","Data":"37faac5a36f51ff838c0450892bd883989e7c7975542ede2170f6b436e0870af"} Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.856575 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"532091ea-ba10-45b3-8843-bf1582e4e30e","Type":"ContainerStarted","Data":"fd156abd3369ff7913178898fd0833e36d65027248b737d21a29af4b4966bf9d"} Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.888790 4767 scope.go:117] "RemoveContainer" containerID="877a9abd2aa860496637699a9dd6fbc9692539289878af2a2f4b9374dbd2fd6a" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.888890 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.906008 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.924791 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 28 18:54:18 crc kubenswrapper[4767]: E0128 18:54:18.925411 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-log" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.925435 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-log" Jan 28 18:54:18 crc kubenswrapper[4767]: E0128 18:54:18.925488 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-api" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.925497 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-api" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.925475 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.925439911 podStartE2EDuration="2.925439911s" podCreationTimestamp="2026-01-28 18:54:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:54:18.920056672 +0000 UTC m=+1464.884239556" watchObservedRunningTime="2026-01-28 18:54:18.925439911 +0000 UTC m=+1464.889622785" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.925736 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-api" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.925771 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" containerName="nova-api-log" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.931839 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.935278 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.935399 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.935577 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.964314 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:54:18 crc kubenswrapper[4767]: I0128 18:54:18.985319 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4c45g" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="registry-server" probeResult="failure" output=< Jan 28 18:54:18 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 18:54:18 crc kubenswrapper[4767]: > Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.008673 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.074270 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.076428 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-public-tls-certs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.076573 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhj66\" (UniqueName: \"kubernetes.io/projected/ebd19321-8c26-4b0c-9303-39e4c35b9050-kube-api-access-dhj66\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.076661 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebd19321-8c26-4b0c-9303-39e4c35b9050-logs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.076733 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-config-data\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.076908 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.178729 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebd19321-8c26-4b0c-9303-39e4c35b9050-logs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.178789 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-config-data\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.178844 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.178888 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.179031 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-public-tls-certs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.179085 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhj66\" (UniqueName: \"kubernetes.io/projected/ebd19321-8c26-4b0c-9303-39e4c35b9050-kube-api-access-dhj66\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.179383 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ebd19321-8c26-4b0c-9303-39e4c35b9050-logs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.187097 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-internal-tls-certs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.188958 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-config-data\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.192765 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-public-tls-certs\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.194073 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ebd19321-8c26-4b0c-9303-39e4c35b9050-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.208908 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhj66\" (UniqueName: \"kubernetes.io/projected/ebd19321-8c26-4b0c-9303-39e4c35b9050-kube-api-access-dhj66\") pod \"nova-api-0\" (UID: \"ebd19321-8c26-4b0c-9303-39e4c35b9050\") " pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.270626 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.874685 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ee1ce6f4-07db-4542-bdf2-49fc980801eb","Type":"ContainerStarted","Data":"6e83f3508536139c58bb2128fa7890df6f1fed081e430d3b3f8d76c9a99e98a0"} Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.875161 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"ee1ce6f4-07db-4542-bdf2-49fc980801eb","Type":"ContainerStarted","Data":"62766c3e01eca165ca9633d2c0fad0f1b5bc9195370333e9e09006b8e4e883b5"} Jan 28 18:54:19 crc kubenswrapper[4767]: I0128 18:54:19.906772 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.906744271 podStartE2EDuration="2.906744271s" podCreationTimestamp="2026-01-28 18:54:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:54:19.899544885 +0000 UTC m=+1465.863727779" watchObservedRunningTime="2026-01-28 18:54:19.906744271 +0000 UTC m=+1465.870927145" Jan 28 18:54:20 crc kubenswrapper[4767]: I0128 18:54:20.540143 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 28 18:54:20 crc kubenswrapper[4767]: I0128 18:54:20.829622 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="506716a2-1403-4388-b6f1-d0d9c2784dde" path="/var/lib/kubelet/pods/506716a2-1403-4388-b6f1-d0d9c2784dde/volumes" Jan 28 18:54:20 crc kubenswrapper[4767]: I0128 18:54:20.886613 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ebd19321-8c26-4b0c-9303-39e4c35b9050","Type":"ContainerStarted","Data":"8b02e60e2798cd5c6c2ccc538dff8433b3ce5df57de69d7328a328e62ef22dde"} Jan 28 18:54:20 crc kubenswrapper[4767]: I0128 18:54:20.890006 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerStarted","Data":"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59"} Jan 28 18:54:21 crc kubenswrapper[4767]: I0128 18:54:21.910519 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerStarted","Data":"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1"} Jan 28 18:54:21 crc kubenswrapper[4767]: I0128 18:54:21.913332 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ebd19321-8c26-4b0c-9303-39e4c35b9050","Type":"ContainerStarted","Data":"7107206fb03f80153a221c34bde43ee0fa864a57079cfcaa6d522965a1e0633f"} Jan 28 18:54:21 crc kubenswrapper[4767]: I0128 18:54:21.913379 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"ebd19321-8c26-4b0c-9303-39e4c35b9050","Type":"ContainerStarted","Data":"41736af330d77b529883fbf328b49f6cbf121d5889a36bc91e121789edc6fa3e"} Jan 28 18:54:21 crc kubenswrapper[4767]: I0128 18:54:21.936548 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.9365233010000003 podStartE2EDuration="3.936523301s" podCreationTimestamp="2026-01-28 18:54:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:54:21.93553668 +0000 UTC m=+1467.899719554" watchObservedRunningTime="2026-01-28 18:54:21.936523301 +0000 UTC m=+1467.900706175" Jan 28 18:54:22 crc kubenswrapper[4767]: I0128 18:54:22.133126 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 18:54:22 crc kubenswrapper[4767]: I0128 18:54:22.133236 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 28 18:54:23 crc kubenswrapper[4767]: I0128 18:54:23.423552 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 28 18:54:23 crc kubenswrapper[4767]: I0128 18:54:23.937897 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerStarted","Data":"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545"} Jan 28 18:54:23 crc kubenswrapper[4767]: I0128 18:54:23.938610 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:54:23 crc kubenswrapper[4767]: I0128 18:54:23.970099 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9179001759999998 podStartE2EDuration="7.970066709s" podCreationTimestamp="2026-01-28 18:54:16 +0000 UTC" firstStartedPulling="2026-01-28 18:54:17.287464648 +0000 UTC m=+1463.251647522" lastFinishedPulling="2026-01-28 18:54:23.339631181 +0000 UTC m=+1469.303814055" observedRunningTime="2026-01-28 18:54:23.960407846 +0000 UTC m=+1469.924590740" watchObservedRunningTime="2026-01-28 18:54:23.970066709 +0000 UTC m=+1469.934249583" Jan 28 18:54:27 crc kubenswrapper[4767]: I0128 18:54:27.133477 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 18:54:27 crc kubenswrapper[4767]: I0128 18:54:27.134329 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 28 18:54:27 crc kubenswrapper[4767]: I0128 18:54:27.923061 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:54:27 crc kubenswrapper[4767]: I0128 18:54:27.989932 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:54:28 crc kubenswrapper[4767]: I0128 18:54:28.152687 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="532091ea-ba10-45b3-8843-bf1582e4e30e" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.216:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:54:28 crc kubenswrapper[4767]: I0128 18:54:28.152735 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="532091ea-ba10-45b3-8843-bf1582e4e30e" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.216:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 28 18:54:28 crc kubenswrapper[4767]: I0128 18:54:28.423561 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 28 18:54:28 crc kubenswrapper[4767]: I0128 18:54:28.459923 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 28 18:54:28 crc kubenswrapper[4767]: I0128 18:54:28.575483 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4c45g"] Jan 28 18:54:28 crc kubenswrapper[4767]: I0128 18:54:28.996363 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4c45g" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="registry-server" containerID="cri-o://790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5" gracePeriod=2 Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.035023 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.271799 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.271881 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.710774 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.779489 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-utilities\") pod \"8753bef4-0577-412e-9bf7-3b311ecda4a1\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.779859 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-catalog-content\") pod \"8753bef4-0577-412e-9bf7-3b311ecda4a1\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.779958 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8cg4x\" (UniqueName: \"kubernetes.io/projected/8753bef4-0577-412e-9bf7-3b311ecda4a1-kube-api-access-8cg4x\") pod \"8753bef4-0577-412e-9bf7-3b311ecda4a1\" (UID: \"8753bef4-0577-412e-9bf7-3b311ecda4a1\") " Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.780827 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-utilities" (OuterVolumeSpecName: "utilities") pod "8753bef4-0577-412e-9bf7-3b311ecda4a1" (UID: "8753bef4-0577-412e-9bf7-3b311ecda4a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.790585 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8753bef4-0577-412e-9bf7-3b311ecda4a1-kube-api-access-8cg4x" (OuterVolumeSpecName: "kube-api-access-8cg4x") pod "8753bef4-0577-412e-9bf7-3b311ecda4a1" (UID: "8753bef4-0577-412e-9bf7-3b311ecda4a1"). InnerVolumeSpecName "kube-api-access-8cg4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.883698 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.883751 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8cg4x\" (UniqueName: \"kubernetes.io/projected/8753bef4-0577-412e-9bf7-3b311ecda4a1-kube-api-access-8cg4x\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.932747 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8753bef4-0577-412e-9bf7-3b311ecda4a1" (UID: "8753bef4-0577-412e-9bf7-3b311ecda4a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:29 crc kubenswrapper[4767]: I0128 18:54:29.985663 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8753bef4-0577-412e-9bf7-3b311ecda4a1-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.012041 4767 generic.go:334] "Generic (PLEG): container finished" podID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerID="790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5" exitCode=0 Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.012512 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4c45g" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.012602 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerDied","Data":"790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5"} Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.012756 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4c45g" event={"ID":"8753bef4-0577-412e-9bf7-3b311ecda4a1","Type":"ContainerDied","Data":"702433f8e5fe0c1b017acf6da47017f47b9fd026bfd4f697665aa4a85631a3d1"} Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.012790 4767 scope.go:117] "RemoveContainer" containerID="790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.069440 4767 scope.go:117] "RemoveContainer" containerID="a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.076869 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4c45g"] Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.088780 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4c45g"] Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.107912 4767 scope.go:117] "RemoveContainer" containerID="d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.171807 4767 scope.go:117] "RemoveContainer" containerID="790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5" Jan 28 18:54:30 crc kubenswrapper[4767]: E0128 18:54:30.172474 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5\": container with ID starting with 790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5 not found: ID does not exist" containerID="790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.172540 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5"} err="failed to get container status \"790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5\": rpc error: code = NotFound desc = could not find container \"790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5\": container with ID starting with 790e6acb8ba84a0f1058d6b8431b1fa222f859dd98803c74348d776651870cb5 not found: ID does not exist" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.172605 4767 scope.go:117] "RemoveContainer" containerID="a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf" Jan 28 18:54:30 crc kubenswrapper[4767]: E0128 18:54:30.173197 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf\": container with ID starting with a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf not found: ID does not exist" containerID="a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.173240 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf"} err="failed to get container status \"a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf\": rpc error: code = NotFound desc = could not find container \"a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf\": container with ID starting with a216f76d1357e21709b5370c9480602fce0edfd344219ae867b3614fcbca5ecf not found: ID does not exist" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.173260 4767 scope.go:117] "RemoveContainer" containerID="d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07" Jan 28 18:54:30 crc kubenswrapper[4767]: E0128 18:54:30.179894 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07\": container with ID starting with d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07 not found: ID does not exist" containerID="d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.179971 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07"} err="failed to get container status \"d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07\": rpc error: code = NotFound desc = could not find container \"d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07\": container with ID starting with d9fa5f09c2568ccc74736575e8bcf850ed55a914fad218414661cecf0fd1aa07 not found: ID does not exist" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.291582 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ebd19321-8c26-4b0c-9303-39e4c35b9050" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.218:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.291607 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="ebd19321-8c26-4b0c-9303-39e4c35b9050" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.218:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 28 18:54:30 crc kubenswrapper[4767]: I0128 18:54:30.810894 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" path="/var/lib/kubelet/pods/8753bef4-0577-412e-9bf7-3b311ecda4a1/volumes" Jan 28 18:54:37 crc kubenswrapper[4767]: I0128 18:54:37.142111 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 18:54:37 crc kubenswrapper[4767]: I0128 18:54:37.142978 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 28 18:54:37 crc kubenswrapper[4767]: I0128 18:54:37.154353 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 18:54:37 crc kubenswrapper[4767]: I0128 18:54:37.154911 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 28 18:54:39 crc kubenswrapper[4767]: I0128 18:54:39.285060 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 18:54:39 crc kubenswrapper[4767]: I0128 18:54:39.286364 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 28 18:54:39 crc kubenswrapper[4767]: I0128 18:54:39.286860 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 18:54:39 crc kubenswrapper[4767]: I0128 18:54:39.286964 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 28 18:54:39 crc kubenswrapper[4767]: I0128 18:54:39.296853 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 18:54:39 crc kubenswrapper[4767]: I0128 18:54:39.297469 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 28 18:54:46 crc kubenswrapper[4767]: I0128 18:54:46.451267 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.036781 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.037958 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="2c76158e-431b-4588-962f-0ac42843de29" containerName="kube-state-metrics" containerID="cri-o://c9391ee18926faa8836982f4136f94d531902a4acc1983c993ea67269e0cabbb" gracePeriod=30 Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.287820 4767 generic.go:334] "Generic (PLEG): container finished" podID="2c76158e-431b-4588-962f-0ac42843de29" containerID="c9391ee18926faa8836982f4136f94d531902a4acc1983c993ea67269e0cabbb" exitCode=2 Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.287940 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"2c76158e-431b-4588-962f-0ac42843de29","Type":"ContainerDied","Data":"c9391ee18926faa8836982f4136f94d531902a4acc1983c993ea67269e0cabbb"} Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.670888 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.833073 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nsm6\" (UniqueName: \"kubernetes.io/projected/2c76158e-431b-4588-962f-0ac42843de29-kube-api-access-5nsm6\") pod \"2c76158e-431b-4588-962f-0ac42843de29\" (UID: \"2c76158e-431b-4588-962f-0ac42843de29\") " Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.860413 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c76158e-431b-4588-962f-0ac42843de29-kube-api-access-5nsm6" (OuterVolumeSpecName: "kube-api-access-5nsm6") pod "2c76158e-431b-4588-962f-0ac42843de29" (UID: "2c76158e-431b-4588-962f-0ac42843de29"). InnerVolumeSpecName "kube-api-access-5nsm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:51 crc kubenswrapper[4767]: I0128 18:54:51.938159 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nsm6\" (UniqueName: \"kubernetes.io/projected/2c76158e-431b-4588-962f-0ac42843de29-kube-api-access-5nsm6\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.299648 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"2c76158e-431b-4588-962f-0ac42843de29","Type":"ContainerDied","Data":"242e5fcded1d5c7e2950a54e1c57d78d77f10b38de5101452f728d625d0c117f"} Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.299732 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.300151 4767 scope.go:117] "RemoveContainer" containerID="c9391ee18926faa8836982f4136f94d531902a4acc1983c993ea67269e0cabbb" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.349099 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.362667 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.376443 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:54:52 crc kubenswrapper[4767]: E0128 18:54:52.378375 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="extract-utilities" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.378409 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="extract-utilities" Jan 28 18:54:52 crc kubenswrapper[4767]: E0128 18:54:52.378427 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c76158e-431b-4588-962f-0ac42843de29" containerName="kube-state-metrics" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.378437 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c76158e-431b-4588-962f-0ac42843de29" containerName="kube-state-metrics" Jan 28 18:54:52 crc kubenswrapper[4767]: E0128 18:54:52.378459 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="registry-server" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.378470 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="registry-server" Jan 28 18:54:52 crc kubenswrapper[4767]: E0128 18:54:52.378499 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="extract-content" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.378508 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="extract-content" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.378774 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8753bef4-0577-412e-9bf7-3b311ecda4a1" containerName="registry-server" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.378797 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c76158e-431b-4588-962f-0ac42843de29" containerName="kube-state-metrics" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.379756 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.383728 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.394591 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.395223 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.451620 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.451676 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.451715 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76678\" (UniqueName: \"kubernetes.io/projected/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-api-access-76678\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.451870 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.554820 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.554908 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.554952 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76678\" (UniqueName: \"kubernetes.io/projected/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-api-access-76678\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.555004 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.565418 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.566860 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.578975 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76678\" (UniqueName: \"kubernetes.io/projected/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-api-access-76678\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.585606 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/df92d3bd-ff9c-4df6-b783-7488249daa20-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"df92d3bd-ff9c-4df6-b783-7488249daa20\") " pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.701671 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 28 18:54:52 crc kubenswrapper[4767]: I0128 18:54:52.899895 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c76158e-431b-4588-962f-0ac42843de29" path="/var/lib/kubelet/pods/2c76158e-431b-4588-962f-0ac42843de29/volumes" Jan 28 18:54:53 crc kubenswrapper[4767]: I0128 18:54:53.276260 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 28 18:54:53 crc kubenswrapper[4767]: I0128 18:54:53.317474 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"df92d3bd-ff9c-4df6-b783-7488249daa20","Type":"ContainerStarted","Data":"c6d8a3ece53b362e86bcb4f2563e7c1b10002a1d3bef97297e2239244dd28f9d"} Jan 28 18:54:53 crc kubenswrapper[4767]: I0128 18:54:53.755431 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:53 crc kubenswrapper[4767]: I0128 18:54:53.755804 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-central-agent" containerID="cri-o://ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9" gracePeriod=30 Jan 28 18:54:53 crc kubenswrapper[4767]: I0128 18:54:53.755881 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="proxy-httpd" containerID="cri-o://44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545" gracePeriod=30 Jan 28 18:54:53 crc kubenswrapper[4767]: I0128 18:54:53.755991 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-notification-agent" containerID="cri-o://5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59" gracePeriod=30 Jan 28 18:54:53 crc kubenswrapper[4767]: I0128 18:54:53.756095 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="sg-core" containerID="cri-o://32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1" gracePeriod=30 Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.331488 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"df92d3bd-ff9c-4df6-b783-7488249daa20","Type":"ContainerStarted","Data":"6ac3f5cfdeb118323c100357a30d06cc3d833f6b225c7928cbf770e6389bb954"} Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.331659 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.336030 4767 generic.go:334] "Generic (PLEG): container finished" podID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerID="44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545" exitCode=0 Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.336073 4767 generic.go:334] "Generic (PLEG): container finished" podID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerID="32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1" exitCode=2 Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.336099 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerDied","Data":"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545"} Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.336130 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerDied","Data":"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1"} Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.358334 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=1.724776292 podStartE2EDuration="2.358309039s" podCreationTimestamp="2026-01-28 18:54:52 +0000 UTC" firstStartedPulling="2026-01-28 18:54:53.282979971 +0000 UTC m=+1499.247162845" lastFinishedPulling="2026-01-28 18:54:53.916512718 +0000 UTC m=+1499.880695592" observedRunningTime="2026-01-28 18:54:54.352225048 +0000 UTC m=+1500.316407942" watchObservedRunningTime="2026-01-28 18:54:54.358309039 +0000 UTC m=+1500.322491913" Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.878842 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.914407 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-sg-core-conf-yaml\") pod \"0acd7c9f-7969-45e3-834c-65f77c66f7df\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.914469 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-run-httpd\") pod \"0acd7c9f-7969-45e3-834c-65f77c66f7df\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.914497 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-combined-ca-bundle\") pod \"0acd7c9f-7969-45e3-834c-65f77c66f7df\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.914577 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcf4j\" (UniqueName: \"kubernetes.io/projected/0acd7c9f-7969-45e3-834c-65f77c66f7df-kube-api-access-gcf4j\") pod \"0acd7c9f-7969-45e3-834c-65f77c66f7df\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.914645 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-scripts\") pod \"0acd7c9f-7969-45e3-834c-65f77c66f7df\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.914703 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-config-data\") pod \"0acd7c9f-7969-45e3-834c-65f77c66f7df\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.914772 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-log-httpd\") pod \"0acd7c9f-7969-45e3-834c-65f77c66f7df\" (UID: \"0acd7c9f-7969-45e3-834c-65f77c66f7df\") " Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.918415 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0acd7c9f-7969-45e3-834c-65f77c66f7df" (UID: "0acd7c9f-7969-45e3-834c-65f77c66f7df"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.918938 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0acd7c9f-7969-45e3-834c-65f77c66f7df" (UID: "0acd7c9f-7969-45e3-834c-65f77c66f7df"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.929195 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0acd7c9f-7969-45e3-834c-65f77c66f7df-kube-api-access-gcf4j" (OuterVolumeSpecName: "kube-api-access-gcf4j") pod "0acd7c9f-7969-45e3-834c-65f77c66f7df" (UID: "0acd7c9f-7969-45e3-834c-65f77c66f7df"). InnerVolumeSpecName "kube-api-access-gcf4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:54:54 crc kubenswrapper[4767]: I0128 18:54:54.958526 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-scripts" (OuterVolumeSpecName: "scripts") pod "0acd7c9f-7969-45e3-834c-65f77c66f7df" (UID: "0acd7c9f-7969-45e3-834c-65f77c66f7df"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.026422 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.026490 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcf4j\" (UniqueName: \"kubernetes.io/projected/0acd7c9f-7969-45e3-834c-65f77c66f7df-kube-api-access-gcf4j\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.026505 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.026518 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0acd7c9f-7969-45e3-834c-65f77c66f7df-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.048359 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0acd7c9f-7969-45e3-834c-65f77c66f7df" (UID: "0acd7c9f-7969-45e3-834c-65f77c66f7df"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.115443 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0acd7c9f-7969-45e3-834c-65f77c66f7df" (UID: "0acd7c9f-7969-45e3-834c-65f77c66f7df"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.139860 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.139898 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.237583 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-config-data" (OuterVolumeSpecName: "config-data") pod "0acd7c9f-7969-45e3-834c-65f77c66f7df" (UID: "0acd7c9f-7969-45e3-834c-65f77c66f7df"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.244134 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0acd7c9f-7969-45e3-834c-65f77c66f7df-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.353420 4767 generic.go:334] "Generic (PLEG): container finished" podID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerID="5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59" exitCode=0 Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.353455 4767 generic.go:334] "Generic (PLEG): container finished" podID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerID="ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9" exitCode=0 Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.353503 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerDied","Data":"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59"} Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.353574 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerDied","Data":"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9"} Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.353590 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0acd7c9f-7969-45e3-834c-65f77c66f7df","Type":"ContainerDied","Data":"55c4da1a6ae9320a77309b1ebfa0a83008b30f3327de814fd4e19d5180a06e27"} Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.353615 4767 scope.go:117] "RemoveContainer" containerID="44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.353521 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.384748 4767 scope.go:117] "RemoveContainer" containerID="32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.412480 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.425485 4767 scope.go:117] "RemoveContainer" containerID="5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.429964 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.457301 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.457907 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-central-agent" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.457931 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-central-agent" Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.457951 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="proxy-httpd" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.457964 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="proxy-httpd" Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.457987 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="sg-core" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.457999 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="sg-core" Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.458021 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-notification-agent" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.458037 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-notification-agent" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.458407 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="sg-core" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.458439 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-central-agent" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.458459 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="proxy-httpd" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.458482 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" containerName="ceilometer-notification-agent" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.460749 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.466733 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.467014 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.467033 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.467178 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.473142 4767 scope.go:117] "RemoveContainer" containerID="ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.537817 4767 scope.go:117] "RemoveContainer" containerID="44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545" Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.540302 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545\": container with ID starting with 44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545 not found: ID does not exist" containerID="44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.540368 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545"} err="failed to get container status \"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545\": rpc error: code = NotFound desc = could not find container \"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545\": container with ID starting with 44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.540429 4767 scope.go:117] "RemoveContainer" containerID="32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1" Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.541309 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1\": container with ID starting with 32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1 not found: ID does not exist" containerID="32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.541369 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1"} err="failed to get container status \"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1\": rpc error: code = NotFound desc = could not find container \"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1\": container with ID starting with 32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.541400 4767 scope.go:117] "RemoveContainer" containerID="5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59" Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.542631 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59\": container with ID starting with 5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59 not found: ID does not exist" containerID="5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.543350 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59"} err="failed to get container status \"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59\": rpc error: code = NotFound desc = could not find container \"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59\": container with ID starting with 5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.543438 4767 scope.go:117] "RemoveContainer" containerID="ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9" Jan 28 18:54:55 crc kubenswrapper[4767]: E0128 18:54:55.548167 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9\": container with ID starting with ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9 not found: ID does not exist" containerID="ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.548287 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9"} err="failed to get container status \"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9\": rpc error: code = NotFound desc = could not find container \"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9\": container with ID starting with ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.548336 4767 scope.go:117] "RemoveContainer" containerID="44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.549769 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545"} err="failed to get container status \"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545\": rpc error: code = NotFound desc = could not find container \"44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545\": container with ID starting with 44cad2803332dc86b280b8940b34dfa1f0a2d3efb85bc0c67fd99c62c4810545 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.549826 4767 scope.go:117] "RemoveContainer" containerID="32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.550281 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1"} err="failed to get container status \"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1\": rpc error: code = NotFound desc = could not find container \"32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1\": container with ID starting with 32740d15cc0acbbb5725f5d808d1457fcaec4e07c1de41bc1c75270776ad03d1 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.550316 4767 scope.go:117] "RemoveContainer" containerID="5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.550688 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59"} err="failed to get container status \"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59\": rpc error: code = NotFound desc = could not find container \"5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59\": container with ID starting with 5805fc456efa262c25c7561e65884461bf3fd217cc3c0e4baebaad7871d76d59 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.550734 4767 scope.go:117] "RemoveContainer" containerID="ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.551784 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9"} err="failed to get container status \"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9\": rpc error: code = NotFound desc = could not find container \"ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9\": container with ID starting with ef8be2ed6ef0067c8e62fad60f420b0f08e3e3f41484c5f22eb7e389bb654fb9 not found: ID does not exist" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.554347 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-run-httpd\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.554520 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9v9g2\" (UniqueName: \"kubernetes.io/projected/6740b200-cccc-45bb-84c7-a524cf79fcff-kube-api-access-9v9g2\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.555449 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.555576 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.555631 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.555735 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-config-data\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.556069 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-log-httpd\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.556174 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-scripts\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.660442 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-run-httpd\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661465 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9v9g2\" (UniqueName: \"kubernetes.io/projected/6740b200-cccc-45bb-84c7-a524cf79fcff-kube-api-access-9v9g2\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661560 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661616 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661641 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661688 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-config-data\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661817 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-log-httpd\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661865 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-scripts\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.661171 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-run-httpd\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.666065 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-log-httpd\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.668111 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-scripts\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.669596 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.670342 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.670878 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-config-data\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.678811 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.692112 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9v9g2\" (UniqueName: \"kubernetes.io/projected/6740b200-cccc-45bb-84c7-a524cf79fcff-kube-api-access-9v9g2\") pod \"ceilometer-0\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " pod="openstack/ceilometer-0" Jan 28 18:54:55 crc kubenswrapper[4767]: I0128 18:54:55.798336 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 18:54:56 crc kubenswrapper[4767]: I0128 18:54:56.349282 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 18:54:56 crc kubenswrapper[4767]: W0128 18:54:56.372510 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6740b200_cccc_45bb_84c7_a524cf79fcff.slice/crio-a6aae9074650910a6e1718660d7339bfb36d1dd788e537f7bf38b750af15c5af WatchSource:0}: Error finding container a6aae9074650910a6e1718660d7339bfb36d1dd788e537f7bf38b750af15c5af: Status 404 returned error can't find the container with id a6aae9074650910a6e1718660d7339bfb36d1dd788e537f7bf38b750af15c5af Jan 28 18:54:56 crc kubenswrapper[4767]: I0128 18:54:56.814712 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0acd7c9f-7969-45e3-834c-65f77c66f7df" path="/var/lib/kubelet/pods/0acd7c9f-7969-45e3-834c-65f77c66f7df/volumes" Jan 28 18:54:57 crc kubenswrapper[4767]: I0128 18:54:57.379650 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerStarted","Data":"2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced"} Jan 28 18:54:57 crc kubenswrapper[4767]: I0128 18:54:57.380153 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerStarted","Data":"a6aae9074650910a6e1718660d7339bfb36d1dd788e537f7bf38b750af15c5af"} Jan 28 18:54:58 crc kubenswrapper[4767]: I0128 18:54:58.395920 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerStarted","Data":"d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300"} Jan 28 18:54:58 crc kubenswrapper[4767]: I0128 18:54:58.658845 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:54:59 crc kubenswrapper[4767]: I0128 18:54:59.415479 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerStarted","Data":"6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb"} Jan 28 18:55:00 crc kubenswrapper[4767]: I0128 18:55:00.147910 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:55:01 crc kubenswrapper[4767]: I0128 18:55:01.457532 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerStarted","Data":"ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b"} Jan 28 18:55:01 crc kubenswrapper[4767]: I0128 18:55:01.458660 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 18:55:01 crc kubenswrapper[4767]: I0128 18:55:01.495064 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.9898850129999999 podStartE2EDuration="6.495040666s" podCreationTimestamp="2026-01-28 18:54:55 +0000 UTC" firstStartedPulling="2026-01-28 18:54:56.376874052 +0000 UTC m=+1502.341056926" lastFinishedPulling="2026-01-28 18:55:00.882029695 +0000 UTC m=+1506.846212579" observedRunningTime="2026-01-28 18:55:01.491717311 +0000 UTC m=+1507.455900205" watchObservedRunningTime="2026-01-28 18:55:01.495040666 +0000 UTC m=+1507.459223540" Jan 28 18:55:02 crc kubenswrapper[4767]: I0128 18:55:02.721141 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 28 18:55:05 crc kubenswrapper[4767]: I0128 18:55:05.197896 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="rabbitmq" containerID="cri-o://1dbdc25ff42287511a85e0c77517dcd153c4a98843b04855b2fd4416b5b4eeb7" gracePeriod=604794 Jan 28 18:55:06 crc kubenswrapper[4767]: I0128 18:55:06.102564 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="rabbitmq" containerID="cri-o://0b430da8b0b7f87d140a51b6b6f8674570a1d1fc69beb8d27fe5333716e57cc7" gracePeriod=604795 Jan 28 18:55:08 crc kubenswrapper[4767]: I0128 18:55:08.320032 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Jan 28 18:55:08 crc kubenswrapper[4767]: I0128 18:55:08.460051 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Jan 28 18:55:11 crc kubenswrapper[4767]: I0128 18:55:11.573607 4767 generic.go:334] "Generic (PLEG): container finished" podID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerID="1dbdc25ff42287511a85e0c77517dcd153c4a98843b04855b2fd4416b5b4eeb7" exitCode=0 Jan 28 18:55:11 crc kubenswrapper[4767]: I0128 18:55:11.573794 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bbea8b85-5bb2-4570-83e7-07dafaade001","Type":"ContainerDied","Data":"1dbdc25ff42287511a85e0c77517dcd153c4a98843b04855b2fd4416b5b4eeb7"} Jan 28 18:55:11 crc kubenswrapper[4767]: I0128 18:55:11.926769 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.110101 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bbea8b85-5bb2-4570-83e7-07dafaade001-pod-info\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.112361 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-plugins\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.112529 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bbea8b85-5bb2-4570-83e7-07dafaade001-erlang-cookie-secret\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.113093 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.113612 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-tls\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.113704 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-erlang-cookie\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.113883 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-server-conf\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.114027 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-plugins-conf\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.114100 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.114151 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krbbr\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-kube-api-access-krbbr\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.114184 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-confd\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.114266 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-config-data\") pod \"bbea8b85-5bb2-4570-83e7-07dafaade001\" (UID: \"bbea8b85-5bb2-4570-83e7-07dafaade001\") " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.115344 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.114257 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.115055 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.121649 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bbea8b85-5bb2-4570-83e7-07dafaade001-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.121666 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.122727 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.123255 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/bbea8b85-5bb2-4570-83e7-07dafaade001-pod-info" (OuterVolumeSpecName: "pod-info") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.123979 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-kube-api-access-krbbr" (OuterVolumeSpecName: "kube-api-access-krbbr") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "kube-api-access-krbbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.173850 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-config-data" (OuterVolumeSpecName: "config-data") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.207619 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-server-conf" (OuterVolumeSpecName: "server-conf") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218238 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218836 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218857 4767 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218872 4767 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218922 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218938 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krbbr\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-kube-api-access-krbbr\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218952 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/bbea8b85-5bb2-4570-83e7-07dafaade001-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218965 4767 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/bbea8b85-5bb2-4570-83e7-07dafaade001-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.218978 4767 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/bbea8b85-5bb2-4570-83e7-07dafaade001-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.298732 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "bbea8b85-5bb2-4570-83e7-07dafaade001" (UID: "bbea8b85-5bb2-4570-83e7-07dafaade001"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.304444 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.323127 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.323180 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/bbea8b85-5bb2-4570-83e7-07dafaade001-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:12 crc kubenswrapper[4767]: E0128 18:55:12.574837 4767 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddedf0cb1_05f8_47b2_b47b_b6de3d563cfd.slice/crio-0b430da8b0b7f87d140a51b6b6f8674570a1d1fc69beb8d27fe5333716e57cc7.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddedf0cb1_05f8_47b2_b47b_b6de3d563cfd.slice/crio-conmon-0b430da8b0b7f87d140a51b6b6f8674570a1d1fc69beb8d27fe5333716e57cc7.scope\": RecentStats: unable to find data in memory cache]" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.619077 4767 generic.go:334] "Generic (PLEG): container finished" podID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerID="0b430da8b0b7f87d140a51b6b6f8674570a1d1fc69beb8d27fe5333716e57cc7" exitCode=0 Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.619195 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd","Type":"ContainerDied","Data":"0b430da8b0b7f87d140a51b6b6f8674570a1d1fc69beb8d27fe5333716e57cc7"} Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.634126 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"bbea8b85-5bb2-4570-83e7-07dafaade001","Type":"ContainerDied","Data":"eac80ddb3c1ad774281824ad7d736f1e5e935cbefe4754a2226078a580883179"} Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.634729 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.634738 4767 scope.go:117] "RemoveContainer" containerID="1dbdc25ff42287511a85e0c77517dcd153c4a98843b04855b2fd4416b5b4eeb7" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.696075 4767 scope.go:117] "RemoveContainer" containerID="d61967d9f7f7e59ac33e6585b330ce622d177128d25eda2f7e2b479e34bbd907" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.724130 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.748109 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.771424 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:55:12 crc kubenswrapper[4767]: E0128 18:55:12.772016 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="rabbitmq" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.772037 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="rabbitmq" Jan 28 18:55:12 crc kubenswrapper[4767]: E0128 18:55:12.772064 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="setup-container" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.772071 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="setup-container" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.772302 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" containerName="rabbitmq" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.775004 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.779072 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.779263 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.779106 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.779593 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.779707 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.779788 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.780484 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-2tqzw" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.789999 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.824882 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bbea8b85-5bb2-4570-83e7-07dafaade001" path="/var/lib/kubelet/pods/bbea8b85-5bb2-4570-83e7-07dafaade001/volumes" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.891829 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.941369 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.941424 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.941469 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.941488 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.941521 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4f9527b6-d3d4-484f-ac80-df76d1a21311-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.941733 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4f9527b6-d3d4-484f-ac80-df76d1a21311-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.941828 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.942033 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.942222 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-config-data\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.942466 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:12 crc kubenswrapper[4767]: I0128 18:55:12.942522 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rsdb8\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-kube-api-access-rsdb8\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044412 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-config-data\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044561 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-plugins-conf\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044614 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-erlang-cookie\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044677 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-confd\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044704 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-tls\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044772 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044815 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-server-conf\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044851 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlc8r\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-kube-api-access-vlc8r\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.044877 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-erlang-cookie-secret\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045008 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-plugins\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045060 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-pod-info\") pod \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\" (UID: \"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd\") " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045463 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045496 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045522 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045551 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045594 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4f9527b6-d3d4-484f-ac80-df76d1a21311-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045632 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4f9527b6-d3d4-484f-ac80-df76d1a21311-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045656 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045740 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045834 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-config-data\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045943 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.045976 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rsdb8\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-kube-api-access-rsdb8\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.054902 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.055484 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.056759 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-server-conf\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.056984 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.057889 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.058381 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-config-data\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.058419 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/4f9527b6-d3d4-484f-ac80-df76d1a21311-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.058482 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-kube-api-access-vlc8r" (OuterVolumeSpecName: "kube-api-access-vlc8r") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "kube-api-access-vlc8r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.059384 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.061125 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/4f9527b6-d3d4-484f-ac80-df76d1a21311-pod-info\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.062171 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.062229 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.063767 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-pod-info" (OuterVolumeSpecName: "pod-info") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.064408 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.073987 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/4f9527b6-d3d4-484f-ac80-df76d1a21311-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.078046 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.080882 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.088571 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rsdb8\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-kube-api-access-rsdb8\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.099713 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/4f9527b6-d3d4-484f-ac80-df76d1a21311-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.117868 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-config-data" (OuterVolumeSpecName: "config-data") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.134734 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-server-0\" (UID: \"4f9527b6-d3d4-484f-ac80-df76d1a21311\") " pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150766 4767 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150817 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150833 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150865 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150879 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlc8r\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-kube-api-access-vlc8r\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150891 4767 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150902 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150913 4767 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-pod-info\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.150922 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.182933 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-server-conf" (OuterVolumeSpecName: "server-conf") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.186932 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.198299 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.253368 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.253420 4767 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-server-conf\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.297116 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" (UID: "dedf0cb1-05f8-47b2-b47b-b6de3d563cfd"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.355852 4767 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.648875 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"dedf0cb1-05f8-47b2-b47b-b6de3d563cfd","Type":"ContainerDied","Data":"d3b8598d2dbb8ad488a8d5ce6e54f8a171ee14c28da972e5bfd2b3ed853c1ac2"} Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.649478 4767 scope.go:117] "RemoveContainer" containerID="0b430da8b0b7f87d140a51b6b6f8674570a1d1fc69beb8d27fe5333716e57cc7" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.649670 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.686042 4767 scope.go:117] "RemoveContainer" containerID="3d481c8dcbb169378cc6a6e69a7028df854ea521a63e90c5e4e9eea8fbf1f230" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.723663 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.787500 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.825348 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:55:13 crc kubenswrapper[4767]: E0128 18:55:13.825989 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="rabbitmq" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.826015 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="rabbitmq" Jan 28 18:55:13 crc kubenswrapper[4767]: E0128 18:55:13.826057 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="setup-container" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.826067 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="setup-container" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.826351 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" containerName="rabbitmq" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.827783 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.837906 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.838590 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-h7ff2" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.838833 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.839143 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.839417 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.839538 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.839598 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.877021 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.900727 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.983758 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.984233 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.984411 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0663f989-bbb4-48a1-b4b8-3463a3a397a1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.984569 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.984664 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.984787 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.984971 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.985127 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n8qr\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-kube-api-access-9n8qr\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.985316 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.985443 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0663f989-bbb4-48a1-b4b8-3463a3a397a1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:13 crc kubenswrapper[4767]: I0128 18:55:13.985585 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.088870 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.088960 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0663f989-bbb4-48a1-b4b8-3463a3a397a1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.088996 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089032 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089048 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089135 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089179 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n8qr\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-kube-api-access-9n8qr\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089252 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089312 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0663f989-bbb4-48a1-b4b8-3463a3a397a1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089351 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.089433 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.090387 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.090403 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.090824 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.091113 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.091581 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.100335 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.101673 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/0663f989-bbb4-48a1-b4b8-3463a3a397a1-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.103277 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/0663f989-bbb4-48a1-b4b8-3463a3a397a1-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.107462 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j4lr2"] Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.111111 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.112595 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/0663f989-bbb4-48a1-b4b8-3463a3a397a1-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.113331 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.118190 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.187770 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j4lr2"] Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.223776 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.223893 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-szmgm\" (UniqueName: \"kubernetes.io/projected/9ed993ae-60f3-43c2-9218-14b86b9f6c91-kube-api-access-szmgm\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.223987 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.224026 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.224222 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.224469 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.224587 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-config\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.225184 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n8qr\" (UniqueName: \"kubernetes.io/projected/0663f989-bbb4-48a1-b4b8-3463a3a397a1-kube-api-access-9n8qr\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.240950 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"0663f989-bbb4-48a1-b4b8-3463a3a397a1\") " pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.327102 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.327180 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-config\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.327330 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.327357 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-szmgm\" (UniqueName: \"kubernetes.io/projected/9ed993ae-60f3-43c2-9218-14b86b9f6c91-kube-api-access-szmgm\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.327394 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.327411 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.327466 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.328503 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-swift-storage-0\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.329118 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-svc\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.330192 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-config\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.330613 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-nb\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.330679 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-sb\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.331335 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-openstack-edpm-ipam\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.353188 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-szmgm\" (UniqueName: \"kubernetes.io/projected/9ed993ae-60f3-43c2-9218-14b86b9f6c91-kube-api-access-szmgm\") pod \"dnsmasq-dns-5b75489c6f-j4lr2\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.472085 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.494068 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.685469 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4f9527b6-d3d4-484f-ac80-df76d1a21311","Type":"ContainerStarted","Data":"7f985abc4a9e5f90f3eb4d27a8631a504296d84a2277b06a307da96689debcd3"} Jan 28 18:55:14 crc kubenswrapper[4767]: I0128 18:55:14.824150 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dedf0cb1-05f8-47b2-b47b-b6de3d563cfd" path="/var/lib/kubelet/pods/dedf0cb1-05f8-47b2-b47b-b6de3d563cfd/volumes" Jan 28 18:55:15 crc kubenswrapper[4767]: I0128 18:55:15.111914 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 28 18:55:15 crc kubenswrapper[4767]: W0128 18:55:15.112530 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0663f989_bbb4_48a1_b4b8_3463a3a397a1.slice/crio-298b220d13d19751c0eadf7fa5cb84320278c30d9de1f59062eb778434dd201a WatchSource:0}: Error finding container 298b220d13d19751c0eadf7fa5cb84320278c30d9de1f59062eb778434dd201a: Status 404 returned error can't find the container with id 298b220d13d19751c0eadf7fa5cb84320278c30d9de1f59062eb778434dd201a Jan 28 18:55:15 crc kubenswrapper[4767]: I0128 18:55:15.215237 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j4lr2"] Jan 28 18:55:15 crc kubenswrapper[4767]: I0128 18:55:15.705106 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" event={"ID":"9ed993ae-60f3-43c2-9218-14b86b9f6c91","Type":"ContainerStarted","Data":"d7fcc4d7d62a6c58453db89ed278415030fa85eae6aefda36b86a3b72fa75128"} Jan 28 18:55:15 crc kubenswrapper[4767]: I0128 18:55:15.708328 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0663f989-bbb4-48a1-b4b8-3463a3a397a1","Type":"ContainerStarted","Data":"298b220d13d19751c0eadf7fa5cb84320278c30d9de1f59062eb778434dd201a"} Jan 28 18:55:16 crc kubenswrapper[4767]: I0128 18:55:16.732693 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4f9527b6-d3d4-484f-ac80-df76d1a21311","Type":"ContainerStarted","Data":"8ff7a985232e280433aaeb5c24c7f1b9d09d8b9a8518697370d63bf274399878"} Jan 28 18:55:16 crc kubenswrapper[4767]: I0128 18:55:16.739538 4767 generic.go:334] "Generic (PLEG): container finished" podID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerID="611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0" exitCode=0 Jan 28 18:55:16 crc kubenswrapper[4767]: I0128 18:55:16.739606 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" event={"ID":"9ed993ae-60f3-43c2-9218-14b86b9f6c91","Type":"ContainerDied","Data":"611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0"} Jan 28 18:55:17 crc kubenswrapper[4767]: I0128 18:55:17.755941 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" event={"ID":"9ed993ae-60f3-43c2-9218-14b86b9f6c91","Type":"ContainerStarted","Data":"0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2"} Jan 28 18:55:17 crc kubenswrapper[4767]: I0128 18:55:17.756136 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:17 crc kubenswrapper[4767]: I0128 18:55:17.759466 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0663f989-bbb4-48a1-b4b8-3463a3a397a1","Type":"ContainerStarted","Data":"4040df8479e2a2322435ec69c153c8cdf43992fee7cd34f5e1711e3645c7a602"} Jan 28 18:55:17 crc kubenswrapper[4767]: I0128 18:55:17.781590 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" podStartSLOduration=3.781562988 podStartE2EDuration="3.781562988s" podCreationTimestamp="2026-01-28 18:55:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:55:17.778473551 +0000 UTC m=+1523.742656455" watchObservedRunningTime="2026-01-28 18:55:17.781562988 +0000 UTC m=+1523.745745862" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.495428 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.590565 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-bbjv2"] Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.590865 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" podUID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerName="dnsmasq-dns" containerID="cri-o://faaabe6fdad6ea7947adb493a2dba4d21a6361a315a5cef9a9eb3d3dcaf75f0c" gracePeriod=10 Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.808625 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-pjf4k"] Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.811080 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.833157 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-pjf4k"] Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.856017 4767 generic.go:334] "Generic (PLEG): container finished" podID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerID="faaabe6fdad6ea7947adb493a2dba4d21a6361a315a5cef9a9eb3d3dcaf75f0c" exitCode=0 Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.856083 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" event={"ID":"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a","Type":"ContainerDied","Data":"faaabe6fdad6ea7947adb493a2dba4d21a6361a315a5cef9a9eb3d3dcaf75f0c"} Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.894778 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-config\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.894869 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.894919 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.894968 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.895000 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-628sk\" (UniqueName: \"kubernetes.io/projected/854fd133-dba5-4457-9017-099a3eacd827-kube-api-access-628sk\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.895054 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.895108 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.997718 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.997820 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-628sk\" (UniqueName: \"kubernetes.io/projected/854fd133-dba5-4457-9017-099a3eacd827-kube-api-access-628sk\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.997881 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.997957 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.998089 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-config\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.998112 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.998142 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.999450 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-dns-svc\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:24 crc kubenswrapper[4767]: I0128 18:55:24.999741 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-ovsdbserver-nb\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.000077 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-dns-swift-storage-0\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.000278 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-config\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.000630 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-ovsdbserver-sb\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.000974 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/854fd133-dba5-4457-9017-099a3eacd827-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.024840 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-628sk\" (UniqueName: \"kubernetes.io/projected/854fd133-dba5-4457-9017-099a3eacd827-kube-api-access-628sk\") pod \"dnsmasq-dns-5d75f767dc-pjf4k\" (UID: \"854fd133-dba5-4457-9017-099a3eacd827\") " pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.154197 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.730503 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d75f767dc-pjf4k"] Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.836584 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.890218 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" event={"ID":"854fd133-dba5-4457-9017-099a3eacd827","Type":"ContainerStarted","Data":"868c09f5451ffa74c27d1804af4fb6b50539fc3d3a08b7a546acb817db49ee77"} Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.923668 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" event={"ID":"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a","Type":"ContainerDied","Data":"5a7745195e634cac4777a6b6bb2c3629100d57b41551479f42aea4cce6206208"} Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.923721 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a7745195e634cac4777a6b6bb2c3629100d57b41551479f42aea4cce6206208" Jan 28 18:55:25 crc kubenswrapper[4767]: I0128 18:55:25.980791 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.027118 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-config\") pod \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.027238 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-svc\") pod \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.027448 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkzzs\" (UniqueName: \"kubernetes.io/projected/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-kube-api-access-hkzzs\") pod \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.027491 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-nb\") pod \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.027558 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-swift-storage-0\") pod \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.027833 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-sb\") pod \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\" (UID: \"3ba5c851-cdd7-40f6-b48e-885ca6a6c95a\") " Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.054374 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-kube-api-access-hkzzs" (OuterVolumeSpecName: "kube-api-access-hkzzs") pod "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" (UID: "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a"). InnerVolumeSpecName "kube-api-access-hkzzs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.131819 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkzzs\" (UniqueName: \"kubernetes.io/projected/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-kube-api-access-hkzzs\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.134182 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" (UID: "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.143507 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" (UID: "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.167889 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-config" (OuterVolumeSpecName: "config") pod "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" (UID: "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.182172 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" (UID: "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.194195 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" (UID: "3ba5c851-cdd7-40f6-b48e-885ca6a6c95a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.233886 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.233934 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.233946 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.233957 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.233969 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.941947 4767 generic.go:334] "Generic (PLEG): container finished" podID="854fd133-dba5-4457-9017-099a3eacd827" containerID="35e99f93d60a7c759a408b224e449f88191f472ba8f45d8c7293ed32c1d327e4" exitCode=0 Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.942010 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" event={"ID":"854fd133-dba5-4457-9017-099a3eacd827","Type":"ContainerDied","Data":"35e99f93d60a7c759a408b224e449f88191f472ba8f45d8c7293ed32c1d327e4"} Jan 28 18:55:26 crc kubenswrapper[4767]: I0128 18:55:26.942516 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-f84f9ccf-bbjv2" Jan 28 18:55:27 crc kubenswrapper[4767]: I0128 18:55:27.003475 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-bbjv2"] Jan 28 18:55:27 crc kubenswrapper[4767]: I0128 18:55:27.013732 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-f84f9ccf-bbjv2"] Jan 28 18:55:27 crc kubenswrapper[4767]: I0128 18:55:27.955990 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" event={"ID":"854fd133-dba5-4457-9017-099a3eacd827","Type":"ContainerStarted","Data":"fff094f0f4339da4fd26eed0fe1acb323237f2ff389fbf8249329d596e441403"} Jan 28 18:55:27 crc kubenswrapper[4767]: I0128 18:55:27.956669 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:27 crc kubenswrapper[4767]: I0128 18:55:27.985343 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" podStartSLOduration=3.985317809 podStartE2EDuration="3.985317809s" podCreationTimestamp="2026-01-28 18:55:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:55:27.979570178 +0000 UTC m=+1533.943753062" watchObservedRunningTime="2026-01-28 18:55:27.985317809 +0000 UTC m=+1533.949500683" Jan 28 18:55:28 crc kubenswrapper[4767]: I0128 18:55:28.807476 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" path="/var/lib/kubelet/pods/3ba5c851-cdd7-40f6-b48e-885ca6a6c95a/volumes" Jan 28 18:55:35 crc kubenswrapper[4767]: I0128 18:55:35.156425 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d75f767dc-pjf4k" Jan 28 18:55:35 crc kubenswrapper[4767]: I0128 18:55:35.223797 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j4lr2"] Jan 28 18:55:35 crc kubenswrapper[4767]: I0128 18:55:35.224086 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" podUID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerName="dnsmasq-dns" containerID="cri-o://0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2" gracePeriod=10 Jan 28 18:55:35 crc kubenswrapper[4767]: I0128 18:55:35.917089 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.063674 4767 generic.go:334] "Generic (PLEG): container finished" podID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerID="0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2" exitCode=0 Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.063779 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.063797 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" event={"ID":"9ed993ae-60f3-43c2-9218-14b86b9f6c91","Type":"ContainerDied","Data":"0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2"} Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.064443 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b75489c6f-j4lr2" event={"ID":"9ed993ae-60f3-43c2-9218-14b86b9f6c91","Type":"ContainerDied","Data":"d7fcc4d7d62a6c58453db89ed278415030fa85eae6aefda36b86a3b72fa75128"} Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.064500 4767 scope.go:117] "RemoveContainer" containerID="0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.066263 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-swift-storage-0\") pod \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.066402 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-nb\") pod \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.066568 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-sb\") pod \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.066800 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-szmgm\" (UniqueName: \"kubernetes.io/projected/9ed993ae-60f3-43c2-9218-14b86b9f6c91-kube-api-access-szmgm\") pod \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.067015 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-openstack-edpm-ipam\") pod \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.068452 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-svc\") pod \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.068632 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-config\") pod \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\" (UID: \"9ed993ae-60f3-43c2-9218-14b86b9f6c91\") " Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.074424 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ed993ae-60f3-43c2-9218-14b86b9f6c91-kube-api-access-szmgm" (OuterVolumeSpecName: "kube-api-access-szmgm") pod "9ed993ae-60f3-43c2-9218-14b86b9f6c91" (UID: "9ed993ae-60f3-43c2-9218-14b86b9f6c91"). InnerVolumeSpecName "kube-api-access-szmgm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.128924 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9ed993ae-60f3-43c2-9218-14b86b9f6c91" (UID: "9ed993ae-60f3-43c2-9218-14b86b9f6c91"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.130306 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-config" (OuterVolumeSpecName: "config") pod "9ed993ae-60f3-43c2-9218-14b86b9f6c91" (UID: "9ed993ae-60f3-43c2-9218-14b86b9f6c91"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.131013 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "9ed993ae-60f3-43c2-9218-14b86b9f6c91" (UID: "9ed993ae-60f3-43c2-9218-14b86b9f6c91"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.131564 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9ed993ae-60f3-43c2-9218-14b86b9f6c91" (UID: "9ed993ae-60f3-43c2-9218-14b86b9f6c91"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.172768 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.172805 4767 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.172819 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-config\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.172832 4767 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.172844 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-szmgm\" (UniqueName: \"kubernetes.io/projected/9ed993ae-60f3-43c2-9218-14b86b9f6c91-kube-api-access-szmgm\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.174791 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9ed993ae-60f3-43c2-9218-14b86b9f6c91" (UID: "9ed993ae-60f3-43c2-9218-14b86b9f6c91"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.188162 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9ed993ae-60f3-43c2-9218-14b86b9f6c91" (UID: "9ed993ae-60f3-43c2-9218-14b86b9f6c91"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.218283 4767 scope.go:117] "RemoveContainer" containerID="611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.246798 4767 scope.go:117] "RemoveContainer" containerID="0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2" Jan 28 18:55:36 crc kubenswrapper[4767]: E0128 18:55:36.248102 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2\": container with ID starting with 0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2 not found: ID does not exist" containerID="0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.248156 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2"} err="failed to get container status \"0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2\": rpc error: code = NotFound desc = could not find container \"0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2\": container with ID starting with 0db4e93b6e6b41d2aa29b1fe35493fa689b8ed39a28ceb31b548ced4a43617b2 not found: ID does not exist" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.248187 4767 scope.go:117] "RemoveContainer" containerID="611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0" Jan 28 18:55:36 crc kubenswrapper[4767]: E0128 18:55:36.248567 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0\": container with ID starting with 611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0 not found: ID does not exist" containerID="611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.248627 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0"} err="failed to get container status \"611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0\": rpc error: code = NotFound desc = could not find container \"611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0\": container with ID starting with 611a3f7e5d2e39aef8e3b1069a7f6df0e67add5333a6fde3b313c6c82b281ce0 not found: ID does not exist" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.275015 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.275058 4767 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9ed993ae-60f3-43c2-9218-14b86b9f6c91-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.483276 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j4lr2"] Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.564442 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b75489c6f-j4lr2"] Jan 28 18:55:36 crc kubenswrapper[4767]: I0128 18:55:36.808095 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" path="/var/lib/kubelet/pods/9ed993ae-60f3-43c2-9218-14b86b9f6c91/volumes" Jan 28 18:55:45 crc kubenswrapper[4767]: I0128 18:55:45.456067 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:55:45 crc kubenswrapper[4767]: I0128 18:55:45.456880 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.206683 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf"] Jan 28 18:55:48 crc kubenswrapper[4767]: E0128 18:55:48.210045 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerName="dnsmasq-dns" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.210070 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerName="dnsmasq-dns" Jan 28 18:55:48 crc kubenswrapper[4767]: E0128 18:55:48.210087 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerName="init" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.210097 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerName="init" Jan 28 18:55:48 crc kubenswrapper[4767]: E0128 18:55:48.210123 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerName="dnsmasq-dns" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.210132 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerName="dnsmasq-dns" Jan 28 18:55:48 crc kubenswrapper[4767]: E0128 18:55:48.210166 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerName="init" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.210173 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerName="init" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.210450 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ed993ae-60f3-43c2-9218-14b86b9f6c91" containerName="dnsmasq-dns" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.210486 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ba5c851-cdd7-40f6-b48e-885ca6a6c95a" containerName="dnsmasq-dns" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.211445 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.221536 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.221539 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.221616 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.221539 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.229462 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf"] Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.374523 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.374584 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8gfrz\" (UniqueName: \"kubernetes.io/projected/e1670ff6-3b80-40cc-99e0-496beccc5afc-kube-api-access-8gfrz\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.374644 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.374996 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.477289 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.477437 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.477602 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.477634 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8gfrz\" (UniqueName: \"kubernetes.io/projected/e1670ff6-3b80-40cc-99e0-496beccc5afc-kube-api-access-8gfrz\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.486590 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-ssh-key-openstack-edpm-ipam\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.490278 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.493162 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.498129 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8gfrz\" (UniqueName: \"kubernetes.io/projected/e1670ff6-3b80-40cc-99e0-496beccc5afc-kube-api-access-8gfrz\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:48 crc kubenswrapper[4767]: I0128 18:55:48.538550 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:55:49 crc kubenswrapper[4767]: I0128 18:55:49.167223 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf"] Jan 28 18:55:49 crc kubenswrapper[4767]: I0128 18:55:49.218381 4767 generic.go:334] "Generic (PLEG): container finished" podID="4f9527b6-d3d4-484f-ac80-df76d1a21311" containerID="8ff7a985232e280433aaeb5c24c7f1b9d09d8b9a8518697370d63bf274399878" exitCode=0 Jan 28 18:55:49 crc kubenswrapper[4767]: I0128 18:55:49.218463 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4f9527b6-d3d4-484f-ac80-df76d1a21311","Type":"ContainerDied","Data":"8ff7a985232e280433aaeb5c24c7f1b9d09d8b9a8518697370d63bf274399878"} Jan 28 18:55:49 crc kubenswrapper[4767]: I0128 18:55:49.223777 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" event={"ID":"e1670ff6-3b80-40cc-99e0-496beccc5afc","Type":"ContainerStarted","Data":"5425412fae9afb30410152b7b5efd6f7f6316819f987581456329a8ea60d617d"} Jan 28 18:55:49 crc kubenswrapper[4767]: I0128 18:55:49.729930 4767 scope.go:117] "RemoveContainer" containerID="08ffc0282e64807b1090c35db34c3f34f2a02e18df344f0201d7ea0034004795" Jan 28 18:55:49 crc kubenswrapper[4767]: I0128 18:55:49.763343 4767 scope.go:117] "RemoveContainer" containerID="389f29fcfb4135d774ea8cab63fb8bfb5c6165befbe491f4aa19dae103cdc380" Jan 28 18:55:50 crc kubenswrapper[4767]: I0128 18:55:50.249380 4767 generic.go:334] "Generic (PLEG): container finished" podID="0663f989-bbb4-48a1-b4b8-3463a3a397a1" containerID="4040df8479e2a2322435ec69c153c8cdf43992fee7cd34f5e1711e3645c7a602" exitCode=0 Jan 28 18:55:50 crc kubenswrapper[4767]: I0128 18:55:50.249479 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0663f989-bbb4-48a1-b4b8-3463a3a397a1","Type":"ContainerDied","Data":"4040df8479e2a2322435ec69c153c8cdf43992fee7cd34f5e1711e3645c7a602"} Jan 28 18:55:50 crc kubenswrapper[4767]: I0128 18:55:50.267416 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"4f9527b6-d3d4-484f-ac80-df76d1a21311","Type":"ContainerStarted","Data":"045c8d37f3d2cc692bba3cc0afd4a0903da274746455af1e4022a609ae157ec4"} Jan 28 18:55:50 crc kubenswrapper[4767]: I0128 18:55:50.267804 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 28 18:55:50 crc kubenswrapper[4767]: I0128 18:55:50.328318 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=38.328293341 podStartE2EDuration="38.328293341s" podCreationTimestamp="2026-01-28 18:55:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:55:50.318490603 +0000 UTC m=+1556.282673477" watchObservedRunningTime="2026-01-28 18:55:50.328293341 +0000 UTC m=+1556.292476215" Jan 28 18:55:51 crc kubenswrapper[4767]: I0128 18:55:51.281491 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"0663f989-bbb4-48a1-b4b8-3463a3a397a1","Type":"ContainerStarted","Data":"a2082f86531528730e531c3f408823d5f166317da1658647e995cc50bffec7db"} Jan 28 18:55:51 crc kubenswrapper[4767]: I0128 18:55:51.282431 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:55:51 crc kubenswrapper[4767]: I0128 18:55:51.321473 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=38.321438598 podStartE2EDuration="38.321438598s" podCreationTimestamp="2026-01-28 18:55:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 18:55:51.3037027 +0000 UTC m=+1557.267885594" watchObservedRunningTime="2026-01-28 18:55:51.321438598 +0000 UTC m=+1557.285621482" Jan 28 18:56:01 crc kubenswrapper[4767]: I0128 18:56:01.292043 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 18:56:02 crc kubenswrapper[4767]: I0128 18:56:02.439638 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" event={"ID":"e1670ff6-3b80-40cc-99e0-496beccc5afc","Type":"ContainerStarted","Data":"0afd5c12680dfe6e82dd31736e39c5c19fc9dbb23d0695d24394009bec7779dc"} Jan 28 18:56:02 crc kubenswrapper[4767]: I0128 18:56:02.470761 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" podStartSLOduration=2.3707401 podStartE2EDuration="14.470734126s" podCreationTimestamp="2026-01-28 18:55:48 +0000 UTC" firstStartedPulling="2026-01-28 18:55:49.187528989 +0000 UTC m=+1555.151711863" lastFinishedPulling="2026-01-28 18:56:01.287523015 +0000 UTC m=+1567.251705889" observedRunningTime="2026-01-28 18:56:02.462146755 +0000 UTC m=+1568.426329639" watchObservedRunningTime="2026-01-28 18:56:02.470734126 +0000 UTC m=+1568.434917000" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.136547 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-7rt8q"] Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.143830 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.170939 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7rt8q"] Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.208159 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="4f9527b6-d3d4-484f-ac80-df76d1a21311" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.221:5671: connect: connection refused" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.210730 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-utilities\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.214920 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-catalog-content\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.215419 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg8c5\" (UniqueName: \"kubernetes.io/projected/9fe44196-e6de-4821-ac4c-f1f9999438bc-kube-api-access-lg8c5\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.318567 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg8c5\" (UniqueName: \"kubernetes.io/projected/9fe44196-e6de-4821-ac4c-f1f9999438bc-kube-api-access-lg8c5\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.319026 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-utilities\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.319118 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-catalog-content\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.319769 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-utilities\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.319800 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-catalog-content\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.345393 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg8c5\" (UniqueName: \"kubernetes.io/projected/9fe44196-e6de-4821-ac4c-f1f9999438bc-kube-api-access-lg8c5\") pod \"community-operators-7rt8q\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:03 crc kubenswrapper[4767]: I0128 18:56:03.471646 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:04 crc kubenswrapper[4767]: I0128 18:56:04.062605 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-7rt8q"] Jan 28 18:56:04 crc kubenswrapper[4767]: I0128 18:56:04.464912 4767 generic.go:334] "Generic (PLEG): container finished" podID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerID="e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3" exitCode=0 Jan 28 18:56:04 crc kubenswrapper[4767]: I0128 18:56:04.465024 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rt8q" event={"ID":"9fe44196-e6de-4821-ac4c-f1f9999438bc","Type":"ContainerDied","Data":"e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3"} Jan 28 18:56:04 crc kubenswrapper[4767]: I0128 18:56:04.465409 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rt8q" event={"ID":"9fe44196-e6de-4821-ac4c-f1f9999438bc","Type":"ContainerStarted","Data":"6ddf7968159d33cbe9f323ca15bcbcf91423b75d0ca55df5d436d6320d971013"} Jan 28 18:56:04 crc kubenswrapper[4767]: I0128 18:56:04.477564 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="0663f989-bbb4-48a1-b4b8-3463a3a397a1" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.222:5671: connect: connection refused" Jan 28 18:56:06 crc kubenswrapper[4767]: I0128 18:56:06.508136 4767 generic.go:334] "Generic (PLEG): container finished" podID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerID="9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2" exitCode=0 Jan 28 18:56:06 crc kubenswrapper[4767]: I0128 18:56:06.508266 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rt8q" event={"ID":"9fe44196-e6de-4821-ac4c-f1f9999438bc","Type":"ContainerDied","Data":"9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2"} Jan 28 18:56:07 crc kubenswrapper[4767]: I0128 18:56:07.521163 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rt8q" event={"ID":"9fe44196-e6de-4821-ac4c-f1f9999438bc","Type":"ContainerStarted","Data":"05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae"} Jan 28 18:56:07 crc kubenswrapper[4767]: I0128 18:56:07.552022 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-7rt8q" podStartSLOduration=2.045326513 podStartE2EDuration="4.551994596s" podCreationTimestamp="2026-01-28 18:56:03 +0000 UTC" firstStartedPulling="2026-01-28 18:56:04.467684057 +0000 UTC m=+1570.431866931" lastFinishedPulling="2026-01-28 18:56:06.97435214 +0000 UTC m=+1572.938535014" observedRunningTime="2026-01-28 18:56:07.548156215 +0000 UTC m=+1573.512339119" watchObservedRunningTime="2026-01-28 18:56:07.551994596 +0000 UTC m=+1573.516177470" Jan 28 18:56:12 crc kubenswrapper[4767]: I0128 18:56:12.600014 4767 generic.go:334] "Generic (PLEG): container finished" podID="e1670ff6-3b80-40cc-99e0-496beccc5afc" containerID="0afd5c12680dfe6e82dd31736e39c5c19fc9dbb23d0695d24394009bec7779dc" exitCode=0 Jan 28 18:56:12 crc kubenswrapper[4767]: I0128 18:56:12.600096 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" event={"ID":"e1670ff6-3b80-40cc-99e0-496beccc5afc","Type":"ContainerDied","Data":"0afd5c12680dfe6e82dd31736e39c5c19fc9dbb23d0695d24394009bec7779dc"} Jan 28 18:56:13 crc kubenswrapper[4767]: I0128 18:56:13.189252 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 28 18:56:13 crc kubenswrapper[4767]: I0128 18:56:13.472807 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:13 crc kubenswrapper[4767]: I0128 18:56:13.472915 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:13 crc kubenswrapper[4767]: I0128 18:56:13.539125 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:13 crc kubenswrapper[4767]: I0128 18:56:13.679904 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:13 crc kubenswrapper[4767]: I0128 18:56:13.794044 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7rt8q"] Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.165756 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.281094 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-ssh-key-openstack-edpm-ipam\") pod \"e1670ff6-3b80-40cc-99e0-496beccc5afc\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.281155 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-repo-setup-combined-ca-bundle\") pod \"e1670ff6-3b80-40cc-99e0-496beccc5afc\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.281763 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8gfrz\" (UniqueName: \"kubernetes.io/projected/e1670ff6-3b80-40cc-99e0-496beccc5afc-kube-api-access-8gfrz\") pod \"e1670ff6-3b80-40cc-99e0-496beccc5afc\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.281821 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-inventory\") pod \"e1670ff6-3b80-40cc-99e0-496beccc5afc\" (UID: \"e1670ff6-3b80-40cc-99e0-496beccc5afc\") " Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.290565 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1670ff6-3b80-40cc-99e0-496beccc5afc-kube-api-access-8gfrz" (OuterVolumeSpecName: "kube-api-access-8gfrz") pod "e1670ff6-3b80-40cc-99e0-496beccc5afc" (UID: "e1670ff6-3b80-40cc-99e0-496beccc5afc"). InnerVolumeSpecName "kube-api-access-8gfrz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.290443 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "e1670ff6-3b80-40cc-99e0-496beccc5afc" (UID: "e1670ff6-3b80-40cc-99e0-496beccc5afc"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.324720 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e1670ff6-3b80-40cc-99e0-496beccc5afc" (UID: "e1670ff6-3b80-40cc-99e0-496beccc5afc"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.329375 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-inventory" (OuterVolumeSpecName: "inventory") pod "e1670ff6-3b80-40cc-99e0-496beccc5afc" (UID: "e1670ff6-3b80-40cc-99e0-496beccc5afc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.385014 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8gfrz\" (UniqueName: \"kubernetes.io/projected/e1670ff6-3b80-40cc-99e0-496beccc5afc-kube-api-access-8gfrz\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.385073 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.385088 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.385105 4767 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e1670ff6-3b80-40cc-99e0-496beccc5afc-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.476519 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.650744 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.650983 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf" event={"ID":"e1670ff6-3b80-40cc-99e0-496beccc5afc","Type":"ContainerDied","Data":"5425412fae9afb30410152b7b5efd6f7f6316819f987581456329a8ea60d617d"} Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.651356 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5425412fae9afb30410152b7b5efd6f7f6316819f987581456329a8ea60d617d" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.729181 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46"] Jan 28 18:56:14 crc kubenswrapper[4767]: E0128 18:56:14.730001 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1670ff6-3b80-40cc-99e0-496beccc5afc" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.730026 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1670ff6-3b80-40cc-99e0-496beccc5afc" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.730338 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1670ff6-3b80-40cc-99e0-496beccc5afc" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.731431 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.749507 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.749781 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.749958 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.750100 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.754412 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46"] Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.794316 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rtp7t\" (UniqueName: \"kubernetes.io/projected/562ce29b-990d-4805-9a26-26dbd95185ed-kube-api-access-rtp7t\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.794399 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.794464 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.895908 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.896197 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rtp7t\" (UniqueName: \"kubernetes.io/projected/562ce29b-990d-4805-9a26-26dbd95185ed-kube-api-access-rtp7t\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.896300 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.919225 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.919288 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-ssh-key-openstack-edpm-ipam\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:14 crc kubenswrapper[4767]: I0128 18:56:14.929133 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rtp7t\" (UniqueName: \"kubernetes.io/projected/562ce29b-990d-4805-9a26-26dbd95185ed-kube-api-access-rtp7t\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-fnn46\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:15 crc kubenswrapper[4767]: I0128 18:56:15.059832 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:15 crc kubenswrapper[4767]: I0128 18:56:15.456324 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:56:15 crc kubenswrapper[4767]: I0128 18:56:15.456843 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:56:15 crc kubenswrapper[4767]: I0128 18:56:15.659985 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-7rt8q" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="registry-server" containerID="cri-o://05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae" gracePeriod=2 Jan 28 18:56:15 crc kubenswrapper[4767]: I0128 18:56:15.820076 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46"] Jan 28 18:56:15 crc kubenswrapper[4767]: W0128 18:56:15.834017 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod562ce29b_990d_4805_9a26_26dbd95185ed.slice/crio-3b260fcb33f00f8e0cc20f085c48a9a79ca46a6e8f71f12529cd849dfd3c3119 WatchSource:0}: Error finding container 3b260fcb33f00f8e0cc20f085c48a9a79ca46a6e8f71f12529cd849dfd3c3119: Status 404 returned error can't find the container with id 3b260fcb33f00f8e0cc20f085c48a9a79ca46a6e8f71f12529cd849dfd3c3119 Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.276704 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.431524 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-utilities\") pod \"9fe44196-e6de-4821-ac4c-f1f9999438bc\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.431961 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lg8c5\" (UniqueName: \"kubernetes.io/projected/9fe44196-e6de-4821-ac4c-f1f9999438bc-kube-api-access-lg8c5\") pod \"9fe44196-e6de-4821-ac4c-f1f9999438bc\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.432177 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-catalog-content\") pod \"9fe44196-e6de-4821-ac4c-f1f9999438bc\" (UID: \"9fe44196-e6de-4821-ac4c-f1f9999438bc\") " Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.432790 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-utilities" (OuterVolumeSpecName: "utilities") pod "9fe44196-e6de-4821-ac4c-f1f9999438bc" (UID: "9fe44196-e6de-4821-ac4c-f1f9999438bc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.433002 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.442825 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9fe44196-e6de-4821-ac4c-f1f9999438bc-kube-api-access-lg8c5" (OuterVolumeSpecName: "kube-api-access-lg8c5") pod "9fe44196-e6de-4821-ac4c-f1f9999438bc" (UID: "9fe44196-e6de-4821-ac4c-f1f9999438bc"). InnerVolumeSpecName "kube-api-access-lg8c5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.501400 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9fe44196-e6de-4821-ac4c-f1f9999438bc" (UID: "9fe44196-e6de-4821-ac4c-f1f9999438bc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.535461 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9fe44196-e6de-4821-ac4c-f1f9999438bc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.535534 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lg8c5\" (UniqueName: \"kubernetes.io/projected/9fe44196-e6de-4821-ac4c-f1f9999438bc-kube-api-access-lg8c5\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.676998 4767 generic.go:334] "Generic (PLEG): container finished" podID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerID="05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae" exitCode=0 Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.677112 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rt8q" event={"ID":"9fe44196-e6de-4821-ac4c-f1f9999438bc","Type":"ContainerDied","Data":"05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae"} Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.677132 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-7rt8q" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.677159 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-7rt8q" event={"ID":"9fe44196-e6de-4821-ac4c-f1f9999438bc","Type":"ContainerDied","Data":"6ddf7968159d33cbe9f323ca15bcbcf91423b75d0ca55df5d436d6320d971013"} Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.677185 4767 scope.go:117] "RemoveContainer" containerID="05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.681335 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" event={"ID":"562ce29b-990d-4805-9a26-26dbd95185ed","Type":"ContainerStarted","Data":"e42d6118f10b664c1191f23b2aa334c48d884f60a9280c83a216e665908672d6"} Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.681390 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" event={"ID":"562ce29b-990d-4805-9a26-26dbd95185ed","Type":"ContainerStarted","Data":"3b260fcb33f00f8e0cc20f085c48a9a79ca46a6e8f71f12529cd849dfd3c3119"} Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.722476 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" podStartSLOduration=2.284703997 podStartE2EDuration="2.722445181s" podCreationTimestamp="2026-01-28 18:56:14 +0000 UTC" firstStartedPulling="2026-01-28 18:56:15.848614426 +0000 UTC m=+1581.812797300" lastFinishedPulling="2026-01-28 18:56:16.28635561 +0000 UTC m=+1582.250538484" observedRunningTime="2026-01-28 18:56:16.711648781 +0000 UTC m=+1582.675831665" watchObservedRunningTime="2026-01-28 18:56:16.722445181 +0000 UTC m=+1582.686628055" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.739722 4767 scope.go:117] "RemoveContainer" containerID="9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.752842 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-7rt8q"] Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.765537 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-7rt8q"] Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.791697 4767 scope.go:117] "RemoveContainer" containerID="e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.829891 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" path="/var/lib/kubelet/pods/9fe44196-e6de-4821-ac4c-f1f9999438bc/volumes" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.831490 4767 scope.go:117] "RemoveContainer" containerID="05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae" Jan 28 18:56:16 crc kubenswrapper[4767]: E0128 18:56:16.832378 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae\": container with ID starting with 05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae not found: ID does not exist" containerID="05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.832484 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae"} err="failed to get container status \"05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae\": rpc error: code = NotFound desc = could not find container \"05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae\": container with ID starting with 05e0dc21c353c5a99482b476d2156f21e30b28b5a987d38bed7d3c0d50c25cae not found: ID does not exist" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.832601 4767 scope.go:117] "RemoveContainer" containerID="9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2" Jan 28 18:56:16 crc kubenswrapper[4767]: E0128 18:56:16.833936 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2\": container with ID starting with 9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2 not found: ID does not exist" containerID="9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.833986 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2"} err="failed to get container status \"9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2\": rpc error: code = NotFound desc = could not find container \"9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2\": container with ID starting with 9b32d70e8010921db59af46bd0c8084dbddb03c5ef29b7f40f7867d0ccc221c2 not found: ID does not exist" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.834005 4767 scope.go:117] "RemoveContainer" containerID="e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3" Jan 28 18:56:16 crc kubenswrapper[4767]: E0128 18:56:16.834279 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3\": container with ID starting with e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3 not found: ID does not exist" containerID="e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3" Jan 28 18:56:16 crc kubenswrapper[4767]: I0128 18:56:16.834295 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3"} err="failed to get container status \"e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3\": rpc error: code = NotFound desc = could not find container \"e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3\": container with ID starting with e342ca528c0b70d277c7b11e8c983fc205a88956fd43511d2b34fe712af317a3 not found: ID does not exist" Jan 28 18:56:20 crc kubenswrapper[4767]: I0128 18:56:20.810897 4767 generic.go:334] "Generic (PLEG): container finished" podID="562ce29b-990d-4805-9a26-26dbd95185ed" containerID="e42d6118f10b664c1191f23b2aa334c48d884f60a9280c83a216e665908672d6" exitCode=0 Jan 28 18:56:20 crc kubenswrapper[4767]: I0128 18:56:20.811006 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" event={"ID":"562ce29b-990d-4805-9a26-26dbd95185ed","Type":"ContainerDied","Data":"e42d6118f10b664c1191f23b2aa334c48d884f60a9280c83a216e665908672d6"} Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.332464 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.450329 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rtp7t\" (UniqueName: \"kubernetes.io/projected/562ce29b-990d-4805-9a26-26dbd95185ed-kube-api-access-rtp7t\") pod \"562ce29b-990d-4805-9a26-26dbd95185ed\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.450806 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-ssh-key-openstack-edpm-ipam\") pod \"562ce29b-990d-4805-9a26-26dbd95185ed\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.451266 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-inventory\") pod \"562ce29b-990d-4805-9a26-26dbd95185ed\" (UID: \"562ce29b-990d-4805-9a26-26dbd95185ed\") " Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.457978 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/562ce29b-990d-4805-9a26-26dbd95185ed-kube-api-access-rtp7t" (OuterVolumeSpecName: "kube-api-access-rtp7t") pod "562ce29b-990d-4805-9a26-26dbd95185ed" (UID: "562ce29b-990d-4805-9a26-26dbd95185ed"). InnerVolumeSpecName "kube-api-access-rtp7t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.484840 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-inventory" (OuterVolumeSpecName: "inventory") pod "562ce29b-990d-4805-9a26-26dbd95185ed" (UID: "562ce29b-990d-4805-9a26-26dbd95185ed"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.489269 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "562ce29b-990d-4805-9a26-26dbd95185ed" (UID: "562ce29b-990d-4805-9a26-26dbd95185ed"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.552847 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rtp7t\" (UniqueName: \"kubernetes.io/projected/562ce29b-990d-4805-9a26-26dbd95185ed-kube-api-access-rtp7t\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.552887 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.552900 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/562ce29b-990d-4805-9a26-26dbd95185ed-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.837705 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" event={"ID":"562ce29b-990d-4805-9a26-26dbd95185ed","Type":"ContainerDied","Data":"3b260fcb33f00f8e0cc20f085c48a9a79ca46a6e8f71f12529cd849dfd3c3119"} Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.837754 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3b260fcb33f00f8e0cc20f085c48a9a79ca46a6e8f71f12529cd849dfd3c3119" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.837784 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-fnn46" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.915616 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph"] Jan 28 18:56:22 crc kubenswrapper[4767]: E0128 18:56:22.916269 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="extract-content" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.916296 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="extract-content" Jan 28 18:56:22 crc kubenswrapper[4767]: E0128 18:56:22.916320 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="extract-utilities" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.916330 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="extract-utilities" Jan 28 18:56:22 crc kubenswrapper[4767]: E0128 18:56:22.916342 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="registry-server" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.916354 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="registry-server" Jan 28 18:56:22 crc kubenswrapper[4767]: E0128 18:56:22.916381 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="562ce29b-990d-4805-9a26-26dbd95185ed" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.916392 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="562ce29b-990d-4805-9a26-26dbd95185ed" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.916648 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="562ce29b-990d-4805-9a26-26dbd95185ed" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.916670 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="9fe44196-e6de-4821-ac4c-f1f9999438bc" containerName="registry-server" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.917667 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.921166 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.921456 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.921853 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.921993 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.931239 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph"] Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.964893 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.965027 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.965077 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wp6s2\" (UniqueName: \"kubernetes.io/projected/7859a3a8-d6ca-41b4-98f6-9561f839948a-kube-api-access-wp6s2\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:22 crc kubenswrapper[4767]: I0128 18:56:22.966803 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.067870 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.068476 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wp6s2\" (UniqueName: \"kubernetes.io/projected/7859a3a8-d6ca-41b4-98f6-9561f839948a-kube-api-access-wp6s2\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.068580 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.068667 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.072848 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.072971 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.074712 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-ssh-key-openstack-edpm-ipam\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.088865 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wp6s2\" (UniqueName: \"kubernetes.io/projected/7859a3a8-d6ca-41b4-98f6-9561f839948a-kube-api-access-wp6s2\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.239655 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.821011 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph"] Jan 28 18:56:23 crc kubenswrapper[4767]: I0128 18:56:23.853611 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" event={"ID":"7859a3a8-d6ca-41b4-98f6-9561f839948a","Type":"ContainerStarted","Data":"6c9cd7eef811858ed30468b0cbc6015d5d115b01c1e836c300c147b7ab3c2ce4"} Jan 28 18:56:24 crc kubenswrapper[4767]: I0128 18:56:24.874547 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" event={"ID":"7859a3a8-d6ca-41b4-98f6-9561f839948a","Type":"ContainerStarted","Data":"fdc22e8431a4e72f2b1795cef136ad6cc061ea77fb82cf9761b4a94f01144129"} Jan 28 18:56:24 crc kubenswrapper[4767]: I0128 18:56:24.904579 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" podStartSLOduration=2.432695943 podStartE2EDuration="2.904538752s" podCreationTimestamp="2026-01-28 18:56:22 +0000 UTC" firstStartedPulling="2026-01-28 18:56:23.818357041 +0000 UTC m=+1589.782539915" lastFinishedPulling="2026-01-28 18:56:24.29019985 +0000 UTC m=+1590.254382724" observedRunningTime="2026-01-28 18:56:24.891875912 +0000 UTC m=+1590.856058786" watchObservedRunningTime="2026-01-28 18:56:24.904538752 +0000 UTC m=+1590.868721626" Jan 28 18:56:45 crc kubenswrapper[4767]: I0128 18:56:45.455829 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 18:56:45 crc kubenswrapper[4767]: I0128 18:56:45.456806 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 18:56:45 crc kubenswrapper[4767]: I0128 18:56:45.456890 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 18:56:45 crc kubenswrapper[4767]: I0128 18:56:45.458180 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 18:56:45 crc kubenswrapper[4767]: I0128 18:56:45.458281 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" gracePeriod=600 Jan 28 18:56:45 crc kubenswrapper[4767]: E0128 18:56:45.582588 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:56:46 crc kubenswrapper[4767]: I0128 18:56:46.095526 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" exitCode=0 Jan 28 18:56:46 crc kubenswrapper[4767]: I0128 18:56:46.095585 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152"} Jan 28 18:56:46 crc kubenswrapper[4767]: I0128 18:56:46.095630 4767 scope.go:117] "RemoveContainer" containerID="6c4deef50f94ebc84f432ab68abee6b83fa4675bb3fde9668560bfed495791e5" Jan 28 18:56:46 crc kubenswrapper[4767]: I0128 18:56:46.096568 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:56:46 crc kubenswrapper[4767]: E0128 18:56:46.097320 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:56:49 crc kubenswrapper[4767]: I0128 18:56:49.981827 4767 scope.go:117] "RemoveContainer" containerID="759efdc292c13b8f3443ede99edb215a1788a2aa61e0956e4bb9246f96f555b8" Jan 28 18:56:59 crc kubenswrapper[4767]: I0128 18:56:59.795926 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:56:59 crc kubenswrapper[4767]: E0128 18:56:59.797099 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:57:10 crc kubenswrapper[4767]: I0128 18:57:10.796725 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:57:10 crc kubenswrapper[4767]: E0128 18:57:10.797819 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:57:24 crc kubenswrapper[4767]: I0128 18:57:24.804675 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:57:24 crc kubenswrapper[4767]: E0128 18:57:24.805890 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:57:35 crc kubenswrapper[4767]: I0128 18:57:35.796530 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:57:35 crc kubenswrapper[4767]: E0128 18:57:35.797722 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:57:47 crc kubenswrapper[4767]: I0128 18:57:47.796989 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:57:47 crc kubenswrapper[4767]: E0128 18:57:47.798694 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:57:50 crc kubenswrapper[4767]: I0128 18:57:50.090455 4767 scope.go:117] "RemoveContainer" containerID="cf22ce5e7df7fc3eaadf896cedc0c1799cfff94aed2f7c3bda1986534359f511" Jan 28 18:57:50 crc kubenswrapper[4767]: I0128 18:57:50.120553 4767 scope.go:117] "RemoveContainer" containerID="50e5b7616f3f00ab5c90b6b604f9b08b8af456577f56689e6606244b7e8f5f0b" Jan 28 18:57:58 crc kubenswrapper[4767]: I0128 18:57:58.797043 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:57:58 crc kubenswrapper[4767]: E0128 18:57:58.798276 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:58:09 crc kubenswrapper[4767]: I0128 18:58:09.796356 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:58:09 crc kubenswrapper[4767]: E0128 18:58:09.797414 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:58:22 crc kubenswrapper[4767]: I0128 18:58:22.797549 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:58:22 crc kubenswrapper[4767]: E0128 18:58:22.798694 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:58:37 crc kubenswrapper[4767]: I0128 18:58:37.796520 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:58:37 crc kubenswrapper[4767]: E0128 18:58:37.797447 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:58:51 crc kubenswrapper[4767]: I0128 18:58:51.796890 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:58:51 crc kubenswrapper[4767]: E0128 18:58:51.798406 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:58:55 crc kubenswrapper[4767]: I0128 18:58:55.054874 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-vfqm5"] Jan 28 18:58:55 crc kubenswrapper[4767]: I0128 18:58:55.070094 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-f0b5-account-create-update-lgdx8"] Jan 28 18:58:55 crc kubenswrapper[4767]: I0128 18:58:55.080593 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-f0b5-account-create-update-lgdx8"] Jan 28 18:58:55 crc kubenswrapper[4767]: I0128 18:58:55.091431 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-vfqm5"] Jan 28 18:58:56 crc kubenswrapper[4767]: I0128 18:58:56.034948 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-af6e-account-create-update-6t8hx"] Jan 28 18:58:56 crc kubenswrapper[4767]: I0128 18:58:56.044185 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-af6e-account-create-update-6t8hx"] Jan 28 18:58:56 crc kubenswrapper[4767]: I0128 18:58:56.809156 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4" path="/var/lib/kubelet/pods/29c6d6d5-bad8-40df-bd2b-90fb7c37bcd4/volumes" Jan 28 18:58:56 crc kubenswrapper[4767]: I0128 18:58:56.810614 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15" path="/var/lib/kubelet/pods/6fef38ea-0f81-4ed7-b7f4-9d26d95c2e15/volumes" Jan 28 18:58:56 crc kubenswrapper[4767]: I0128 18:58:56.811618 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7f6bed0-23bc-4917-a2d1-2e6439ef6205" path="/var/lib/kubelet/pods/d7f6bed0-23bc-4917-a2d1-2e6439ef6205/volumes" Jan 28 18:59:01 crc kubenswrapper[4767]: I0128 18:59:01.055940 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-dhkzb"] Jan 28 18:59:01 crc kubenswrapper[4767]: I0128 18:59:01.070294 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-dhkzb"] Jan 28 18:59:02 crc kubenswrapper[4767]: I0128 18:59:02.796291 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:59:02 crc kubenswrapper[4767]: E0128 18:59:02.796599 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:59:02 crc kubenswrapper[4767]: I0128 18:59:02.808156 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fe833b02-be1c-45d7-88b0-8c18d9471631" path="/var/lib/kubelet/pods/fe833b02-be1c-45d7-88b0-8c18d9471631/volumes" Jan 28 18:59:04 crc kubenswrapper[4767]: I0128 18:59:04.041359 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-44ggf"] Jan 28 18:59:04 crc kubenswrapper[4767]: I0128 18:59:04.056021 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-44ggf"] Jan 28 18:59:04 crc kubenswrapper[4767]: I0128 18:59:04.806600 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ef954606-bfec-4472-8fff-04d3fa5e3bf0" path="/var/lib/kubelet/pods/ef954606-bfec-4472-8fff-04d3fa5e3bf0/volumes" Jan 28 18:59:07 crc kubenswrapper[4767]: I0128 18:59:07.038926 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-1668-account-create-update-7rp66"] Jan 28 18:59:07 crc kubenswrapper[4767]: I0128 18:59:07.054562 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-2rnmq"] Jan 28 18:59:07 crc kubenswrapper[4767]: I0128 18:59:07.065688 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-2rnmq"] Jan 28 18:59:07 crc kubenswrapper[4767]: I0128 18:59:07.079031 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-1668-account-create-update-7rp66"] Jan 28 18:59:08 crc kubenswrapper[4767]: I0128 18:59:08.809407 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ce20579-faaa-46f4-8d21-ae1fa142da52" path="/var/lib/kubelet/pods/8ce20579-faaa-46f4-8d21-ae1fa142da52/volumes" Jan 28 18:59:08 crc kubenswrapper[4767]: I0128 18:59:08.810522 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c29bdd1f-3c65-4fbc-9365-b7f446afc233" path="/var/lib/kubelet/pods/c29bdd1f-3c65-4fbc-9365-b7f446afc233/volumes" Jan 28 18:59:13 crc kubenswrapper[4767]: I0128 18:59:13.795710 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:59:13 crc kubenswrapper[4767]: E0128 18:59:13.796613 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:59:25 crc kubenswrapper[4767]: I0128 18:59:25.796043 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:59:25 crc kubenswrapper[4767]: E0128 18:59:25.797458 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:59:27 crc kubenswrapper[4767]: I0128 18:59:27.915632 4767 generic.go:334] "Generic (PLEG): container finished" podID="7859a3a8-d6ca-41b4-98f6-9561f839948a" containerID="fdc22e8431a4e72f2b1795cef136ad6cc061ea77fb82cf9761b4a94f01144129" exitCode=0 Jan 28 18:59:27 crc kubenswrapper[4767]: I0128 18:59:27.917057 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" event={"ID":"7859a3a8-d6ca-41b4-98f6-9561f839948a","Type":"ContainerDied","Data":"fdc22e8431a4e72f2b1795cef136ad6cc061ea77fb82cf9761b4a94f01144129"} Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.416098 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.575639 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wp6s2\" (UniqueName: \"kubernetes.io/projected/7859a3a8-d6ca-41b4-98f6-9561f839948a-kube-api-access-wp6s2\") pod \"7859a3a8-d6ca-41b4-98f6-9561f839948a\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.575935 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-ssh-key-openstack-edpm-ipam\") pod \"7859a3a8-d6ca-41b4-98f6-9561f839948a\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.576005 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-inventory\") pod \"7859a3a8-d6ca-41b4-98f6-9561f839948a\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.576320 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-bootstrap-combined-ca-bundle\") pod \"7859a3a8-d6ca-41b4-98f6-9561f839948a\" (UID: \"7859a3a8-d6ca-41b4-98f6-9561f839948a\") " Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.584150 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "7859a3a8-d6ca-41b4-98f6-9561f839948a" (UID: "7859a3a8-d6ca-41b4-98f6-9561f839948a"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.584613 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7859a3a8-d6ca-41b4-98f6-9561f839948a-kube-api-access-wp6s2" (OuterVolumeSpecName: "kube-api-access-wp6s2") pod "7859a3a8-d6ca-41b4-98f6-9561f839948a" (UID: "7859a3a8-d6ca-41b4-98f6-9561f839948a"). InnerVolumeSpecName "kube-api-access-wp6s2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.623261 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7859a3a8-d6ca-41b4-98f6-9561f839948a" (UID: "7859a3a8-d6ca-41b4-98f6-9561f839948a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.646304 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-inventory" (OuterVolumeSpecName: "inventory") pod "7859a3a8-d6ca-41b4-98f6-9561f839948a" (UID: "7859a3a8-d6ca-41b4-98f6-9561f839948a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.678926 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wp6s2\" (UniqueName: \"kubernetes.io/projected/7859a3a8-d6ca-41b4-98f6-9561f839948a-kube-api-access-wp6s2\") on node \"crc\" DevicePath \"\"" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.678971 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.678988 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.678999 4767 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7859a3a8-d6ca-41b4-98f6-9561f839948a-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.942950 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" event={"ID":"7859a3a8-d6ca-41b4-98f6-9561f839948a","Type":"ContainerDied","Data":"6c9cd7eef811858ed30468b0cbc6015d5d115b01c1e836c300c147b7ab3c2ce4"} Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.943624 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c9cd7eef811858ed30468b0cbc6015d5d115b01c1e836c300c147b7ab3c2ce4" Jan 28 18:59:29 crc kubenswrapper[4767]: I0128 18:59:29.943453 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.041245 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6"] Jan 28 18:59:30 crc kubenswrapper[4767]: E0128 18:59:30.041717 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7859a3a8-d6ca-41b4-98f6-9561f839948a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.041737 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7859a3a8-d6ca-41b4-98f6-9561f839948a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.041920 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7859a3a8-d6ca-41b4-98f6-9561f839948a" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.042665 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.046102 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.046345 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.046743 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.048649 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.072040 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6"] Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.189443 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.190017 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.190152 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnprx\" (UniqueName: \"kubernetes.io/projected/1ebe359f-1b9c-4278-9c6c-5c72cf619080-kube-api-access-wnprx\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.292638 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.292769 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.292813 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnprx\" (UniqueName: \"kubernetes.io/projected/1ebe359f-1b9c-4278-9c6c-5c72cf619080-kube-api-access-wnprx\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.298658 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-ssh-key-openstack-edpm-ipam\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.299150 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.315329 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnprx\" (UniqueName: \"kubernetes.io/projected/1ebe359f-1b9c-4278-9c6c-5c72cf619080-kube-api-access-wnprx\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.369078 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 18:59:30 crc kubenswrapper[4767]: I0128 18:59:30.998320 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 18:59:31 crc kubenswrapper[4767]: I0128 18:59:31.008811 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6"] Jan 28 18:59:31 crc kubenswrapper[4767]: I0128 18:59:31.967105 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" event={"ID":"1ebe359f-1b9c-4278-9c6c-5c72cf619080","Type":"ContainerStarted","Data":"3bc38e7d047508385921ee832664905cab7cf41f5c16b0738310db33a140bb65"} Jan 28 18:59:31 crc kubenswrapper[4767]: I0128 18:59:31.992603 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" podStartSLOduration=1.274986585 podStartE2EDuration="1.992581599s" podCreationTimestamp="2026-01-28 18:59:30 +0000 UTC" firstStartedPulling="2026-01-28 18:59:30.997997375 +0000 UTC m=+1776.962180249" lastFinishedPulling="2026-01-28 18:59:31.715592389 +0000 UTC m=+1777.679775263" observedRunningTime="2026-01-28 18:59:31.982845464 +0000 UTC m=+1777.947028348" watchObservedRunningTime="2026-01-28 18:59:31.992581599 +0000 UTC m=+1777.956764473" Jan 28 18:59:32 crc kubenswrapper[4767]: I0128 18:59:32.979032 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" event={"ID":"1ebe359f-1b9c-4278-9c6c-5c72cf619080","Type":"ContainerStarted","Data":"8bdf59f4d73566049b327418e4c6ea99f56d4ac2c570d82fee13283669156ebd"} Jan 28 18:59:34 crc kubenswrapper[4767]: I0128 18:59:34.047593 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-6dl26"] Jan 28 18:59:34 crc kubenswrapper[4767]: I0128 18:59:34.058890 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-6dl26"] Jan 28 18:59:34 crc kubenswrapper[4767]: I0128 18:59:34.808329 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ebb33a9-654c-4edc-b487-7b2a08c424c6" path="/var/lib/kubelet/pods/8ebb33a9-654c-4edc-b487-7b2a08c424c6/volumes" Jan 28 18:59:36 crc kubenswrapper[4767]: I0128 18:59:36.033026 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-9qfw8"] Jan 28 18:59:36 crc kubenswrapper[4767]: I0128 18:59:36.043690 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-9qfw8"] Jan 28 18:59:36 crc kubenswrapper[4767]: I0128 18:59:36.807785 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4fd6701-6878-4f65-b3aa-fd437ec22ba2" path="/var/lib/kubelet/pods/a4fd6701-6878-4f65-b3aa-fd437ec22ba2/volumes" Jan 28 18:59:37 crc kubenswrapper[4767]: I0128 18:59:37.796432 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:59:37 crc kubenswrapper[4767]: E0128 18:59:37.797311 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.057975 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-b47a-account-create-update-ngbck"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.081062 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-create-m5cgt"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.109288 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c777-account-create-update-rg925"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.131401 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-885qk"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.167389 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-b47a-account-create-update-ngbck"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.213609 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-885qk"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.243295 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-create-m5cgt"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.264680 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-c777-account-create-update-rg925"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.285167 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-39de-account-create-update-cc7p9"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.302464 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-5774-account-create-update-8ptkt"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.318924 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-5774-account-create-update-8ptkt"] Jan 28 18:59:41 crc kubenswrapper[4767]: I0128 18:59:41.334065 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-39de-account-create-update-cc7p9"] Jan 28 18:59:42 crc kubenswrapper[4767]: I0128 18:59:42.807380 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c38d151-db9b-43d4-ab60-44b7f3169fd2" path="/var/lib/kubelet/pods/3c38d151-db9b-43d4-ab60-44b7f3169fd2/volumes" Jan 28 18:59:42 crc kubenswrapper[4767]: I0128 18:59:42.808717 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5173a106-3a63-4c67-8b28-d30ccf1fea1c" path="/var/lib/kubelet/pods/5173a106-3a63-4c67-8b28-d30ccf1fea1c/volumes" Jan 28 18:59:42 crc kubenswrapper[4767]: I0128 18:59:42.809458 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8a81f2b7-8862-4a59-bf90-23a8310a9b19" path="/var/lib/kubelet/pods/8a81f2b7-8862-4a59-bf90-23a8310a9b19/volumes" Jan 28 18:59:42 crc kubenswrapper[4767]: I0128 18:59:42.810174 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="927eabd5-6408-478b-8740-3897f0efcb05" path="/var/lib/kubelet/pods/927eabd5-6408-478b-8740-3897f0efcb05/volumes" Jan 28 18:59:42 crc kubenswrapper[4767]: I0128 18:59:42.811809 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb33b8a5-83e7-4431-b6fa-489b022f600e" path="/var/lib/kubelet/pods/eb33b8a5-83e7-4431-b6fa-489b022f600e/volumes" Jan 28 18:59:42 crc kubenswrapper[4767]: I0128 18:59:42.812633 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fced9a49-f6fd-4f6f-a71d-b7951315008f" path="/var/lib/kubelet/pods/fced9a49-f6fd-4f6f-a71d-b7951315008f/volumes" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.049267 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-q4h5g"] Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.064838 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-q4h5g"] Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.227933 4767 scope.go:117] "RemoveContainer" containerID="3411901c2af49c12222933bdd5d4f82c1039997c6aecd44390366c79c983da89" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.265922 4767 scope.go:117] "RemoveContainer" containerID="4bdf27cd4b3bf526be591961377cd271a0d79b960e1c8c515177ad61fd282029" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.329597 4767 scope.go:117] "RemoveContainer" containerID="c873fb105016dbbff060df0427f33fe28c908d5832146799efff79a9a1ac534c" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.358148 4767 scope.go:117] "RemoveContainer" containerID="5cc39b115922ca7184e378a6ee63563e811ca896a6cf442e71bc32d5dacf16d7" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.428424 4767 scope.go:117] "RemoveContainer" containerID="f663a8faba36af3ac56d6aa0cb4d23e7911f0260f623f384e9d59a54b4f7de22" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.519234 4767 scope.go:117] "RemoveContainer" containerID="cce7ef92922cb7e00c3f9bafec2029d583353b8ad4914634ffaa1060b755870a" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.593080 4767 scope.go:117] "RemoveContainer" containerID="0db59ac617bfc346f657797ad47c6cde7a7226af5e20893b38842784fa29c9da" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.657675 4767 scope.go:117] "RemoveContainer" containerID="faaabe6fdad6ea7947adb493a2dba4d21a6361a315a5cef9a9eb3d3dcaf75f0c" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.682752 4767 scope.go:117] "RemoveContainer" containerID="18466654814e5319f7e1f0c3d7eb680c837d446a0289f97230ad5195aeabe787" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.705357 4767 scope.go:117] "RemoveContainer" containerID="1314d240d923447f7d45dea9cc06c7db6dd1892b6475cfc81ebaa81b2f830f78" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.726190 4767 scope.go:117] "RemoveContainer" containerID="b68b5de623d0808960949f320fa16bab631ec0ed912b4cb25786d725656546b4" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.754804 4767 scope.go:117] "RemoveContainer" containerID="24a4e4a7af49a58a09c180060abf310a5c5b4fd2c88e758b86e82d4128987380" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.778832 4767 scope.go:117] "RemoveContainer" containerID="2156592dafc87c9c01fb56a217996f56bdd0917a97377e6ad61bbc59b08f1669" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.796428 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 18:59:50 crc kubenswrapper[4767]: E0128 18:59:50.796902 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.817377 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2df7ec48-f703-40db-9f31-f4f216de7935" path="/var/lib/kubelet/pods/2df7ec48-f703-40db-9f31-f4f216de7935/volumes" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.827729 4767 scope.go:117] "RemoveContainer" containerID="ad3c363464601bbaf2037b7cd24bb10690988168b7388dc4d7d9d0b205f29eea" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.856307 4767 scope.go:117] "RemoveContainer" containerID="20c8be4ec78c94967c72b6c310a4ee508ea503939cc24f9830fba93fc0d942ab" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.882831 4767 scope.go:117] "RemoveContainer" containerID="5c147555279081912bdef04baf605e41f62d029008869b2b65d655b149dff921" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.908495 4767 scope.go:117] "RemoveContainer" containerID="583d0abace9a26ea2c78066a5abf0e36aff5c7002d177025bba69504223e9ba7" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.932664 4767 scope.go:117] "RemoveContainer" containerID="67a62af277ea081937b0dfaff2d0c59fe92e22b9640f4b7fcfa05645a22e13d0" Jan 28 18:59:50 crc kubenswrapper[4767]: I0128 18:59:50.952240 4767 scope.go:117] "RemoveContainer" containerID="f03428e8a4148ed782d6bc54448b718bf7bbfa3ca45e3928ae8ef1851939171e" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.161518 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd"] Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.165089 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.179335 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd"] Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.185871 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.186477 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.306864 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnzv8\" (UniqueName: \"kubernetes.io/projected/5057532f-71be-41b1-8c80-248b78eb8d20-kube-api-access-hnzv8\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.307305 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5057532f-71be-41b1-8c80-248b78eb8d20-secret-volume\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.307350 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5057532f-71be-41b1-8c80-248b78eb8d20-config-volume\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.409255 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnzv8\" (UniqueName: \"kubernetes.io/projected/5057532f-71be-41b1-8c80-248b78eb8d20-kube-api-access-hnzv8\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.409414 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5057532f-71be-41b1-8c80-248b78eb8d20-secret-volume\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.409466 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5057532f-71be-41b1-8c80-248b78eb8d20-config-volume\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.410495 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5057532f-71be-41b1-8c80-248b78eb8d20-config-volume\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.417597 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5057532f-71be-41b1-8c80-248b78eb8d20-secret-volume\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.429759 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnzv8\" (UniqueName: \"kubernetes.io/projected/5057532f-71be-41b1-8c80-248b78eb8d20-kube-api-access-hnzv8\") pod \"collect-profiles-29493780-qlmdd\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:00 crc kubenswrapper[4767]: I0128 19:00:00.528619 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:01 crc kubenswrapper[4767]: I0128 19:00:01.036318 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd"] Jan 28 19:00:01 crc kubenswrapper[4767]: I0128 19:00:01.298044 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" event={"ID":"5057532f-71be-41b1-8c80-248b78eb8d20","Type":"ContainerStarted","Data":"179fda6c939919b807c514737b104ad076da709e228701747f22acd59d26b53e"} Jan 28 19:00:01 crc kubenswrapper[4767]: I0128 19:00:01.298597 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" event={"ID":"5057532f-71be-41b1-8c80-248b78eb8d20","Type":"ContainerStarted","Data":"13aac0bbc5a1b1190cefee0db4a74c71a16f478ff9992768b9ea32e61d7171ba"} Jan 28 19:00:01 crc kubenswrapper[4767]: I0128 19:00:01.324008 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" podStartSLOduration=1.323985766 podStartE2EDuration="1.323985766s" podCreationTimestamp="2026-01-28 19:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:00:01.318256286 +0000 UTC m=+1807.282439170" watchObservedRunningTime="2026-01-28 19:00:01.323985766 +0000 UTC m=+1807.288168630" Jan 28 19:00:01 crc kubenswrapper[4767]: I0128 19:00:01.796115 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:00:01 crc kubenswrapper[4767]: E0128 19:00:01.796947 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:00:02 crc kubenswrapper[4767]: I0128 19:00:02.309836 4767 generic.go:334] "Generic (PLEG): container finished" podID="5057532f-71be-41b1-8c80-248b78eb8d20" containerID="179fda6c939919b807c514737b104ad076da709e228701747f22acd59d26b53e" exitCode=0 Jan 28 19:00:02 crc kubenswrapper[4767]: I0128 19:00:02.309893 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" event={"ID":"5057532f-71be-41b1-8c80-248b78eb8d20","Type":"ContainerDied","Data":"179fda6c939919b807c514737b104ad076da709e228701747f22acd59d26b53e"} Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.680962 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.790145 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5057532f-71be-41b1-8c80-248b78eb8d20-secret-volume\") pod \"5057532f-71be-41b1-8c80-248b78eb8d20\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.790282 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5057532f-71be-41b1-8c80-248b78eb8d20-config-volume\") pod \"5057532f-71be-41b1-8c80-248b78eb8d20\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.790788 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnzv8\" (UniqueName: \"kubernetes.io/projected/5057532f-71be-41b1-8c80-248b78eb8d20-kube-api-access-hnzv8\") pod \"5057532f-71be-41b1-8c80-248b78eb8d20\" (UID: \"5057532f-71be-41b1-8c80-248b78eb8d20\") " Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.791499 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5057532f-71be-41b1-8c80-248b78eb8d20-config-volume" (OuterVolumeSpecName: "config-volume") pod "5057532f-71be-41b1-8c80-248b78eb8d20" (UID: "5057532f-71be-41b1-8c80-248b78eb8d20"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.792014 4767 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5057532f-71be-41b1-8c80-248b78eb8d20-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.799464 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5057532f-71be-41b1-8c80-248b78eb8d20-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5057532f-71be-41b1-8c80-248b78eb8d20" (UID: "5057532f-71be-41b1-8c80-248b78eb8d20"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.800771 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5057532f-71be-41b1-8c80-248b78eb8d20-kube-api-access-hnzv8" (OuterVolumeSpecName: "kube-api-access-hnzv8") pod "5057532f-71be-41b1-8c80-248b78eb8d20" (UID: "5057532f-71be-41b1-8c80-248b78eb8d20"). InnerVolumeSpecName "kube-api-access-hnzv8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.893569 4767 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5057532f-71be-41b1-8c80-248b78eb8d20-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:03 crc kubenswrapper[4767]: I0128 19:00:03.893619 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnzv8\" (UniqueName: \"kubernetes.io/projected/5057532f-71be-41b1-8c80-248b78eb8d20-kube-api-access-hnzv8\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:04 crc kubenswrapper[4767]: I0128 19:00:04.335506 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" event={"ID":"5057532f-71be-41b1-8c80-248b78eb8d20","Type":"ContainerDied","Data":"13aac0bbc5a1b1190cefee0db4a74c71a16f478ff9992768b9ea32e61d7171ba"} Jan 28 19:00:04 crc kubenswrapper[4767]: I0128 19:00:04.335559 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="13aac0bbc5a1b1190cefee0db4a74c71a16f478ff9992768b9ea32e61d7171ba" Jan 28 19:00:04 crc kubenswrapper[4767]: I0128 19:00:04.335587 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd" Jan 28 19:00:13 crc kubenswrapper[4767]: I0128 19:00:13.796072 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:00:13 crc kubenswrapper[4767]: E0128 19:00:13.797017 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:00:19 crc kubenswrapper[4767]: I0128 19:00:19.988285 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-4srtl"] Jan 28 19:00:19 crc kubenswrapper[4767]: E0128 19:00:19.989315 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5057532f-71be-41b1-8c80-248b78eb8d20" containerName="collect-profiles" Jan 28 19:00:19 crc kubenswrapper[4767]: I0128 19:00:19.989329 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="5057532f-71be-41b1-8c80-248b78eb8d20" containerName="collect-profiles" Jan 28 19:00:19 crc kubenswrapper[4767]: I0128 19:00:19.989500 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="5057532f-71be-41b1-8c80-248b78eb8d20" containerName="collect-profiles" Jan 28 19:00:19 crc kubenswrapper[4767]: I0128 19:00:19.990965 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.017788 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4srtl"] Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.076674 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sg46\" (UniqueName: \"kubernetes.io/projected/a94f804a-cb4a-4e4e-beff-aaac7f117710-kube-api-access-2sg46\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.076786 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-catalog-content\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.076809 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-utilities\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.179309 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sg46\" (UniqueName: \"kubernetes.io/projected/a94f804a-cb4a-4e4e-beff-aaac7f117710-kube-api-access-2sg46\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.179442 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-catalog-content\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.179467 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-utilities\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.180097 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-catalog-content\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.180126 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-utilities\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.207730 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sg46\" (UniqueName: \"kubernetes.io/projected/a94f804a-cb4a-4e4e-beff-aaac7f117710-kube-api-access-2sg46\") pod \"certified-operators-4srtl\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.314921 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:20 crc kubenswrapper[4767]: I0128 19:00:20.873301 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-4srtl"] Jan 28 19:00:21 crc kubenswrapper[4767]: I0128 19:00:21.540463 4767 generic.go:334] "Generic (PLEG): container finished" podID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerID="b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83" exitCode=0 Jan 28 19:00:21 crc kubenswrapper[4767]: I0128 19:00:21.540558 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4srtl" event={"ID":"a94f804a-cb4a-4e4e-beff-aaac7f117710","Type":"ContainerDied","Data":"b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83"} Jan 28 19:00:21 crc kubenswrapper[4767]: I0128 19:00:21.540832 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4srtl" event={"ID":"a94f804a-cb4a-4e4e-beff-aaac7f117710","Type":"ContainerStarted","Data":"50985934048ebb9d87bd74d356534713154262cb41ca5bcb17dec44a9fe384cd"} Jan 28 19:00:22 crc kubenswrapper[4767]: I0128 19:00:22.557070 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4srtl" event={"ID":"a94f804a-cb4a-4e4e-beff-aaac7f117710","Type":"ContainerStarted","Data":"8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887"} Jan 28 19:00:22 crc kubenswrapper[4767]: I0128 19:00:22.986265 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4dhrg"] Jan 28 19:00:22 crc kubenswrapper[4767]: I0128 19:00:22.989237 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:22 crc kubenswrapper[4767]: I0128 19:00:22.999536 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4dhrg"] Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.057236 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-utilities\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.057389 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-catalog-content\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.057420 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hpxp\" (UniqueName: \"kubernetes.io/projected/7191c158-a8ea-444e-bb84-1e6fd63d6538-kube-api-access-7hpxp\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.159061 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hpxp\" (UniqueName: \"kubernetes.io/projected/7191c158-a8ea-444e-bb84-1e6fd63d6538-kube-api-access-7hpxp\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.159351 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-utilities\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.159410 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-catalog-content\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.159931 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-utilities\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.160046 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-catalog-content\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.180825 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hpxp\" (UniqueName: \"kubernetes.io/projected/7191c158-a8ea-444e-bb84-1e6fd63d6538-kube-api-access-7hpxp\") pod \"redhat-marketplace-4dhrg\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.317717 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.569408 4767 generic.go:334] "Generic (PLEG): container finished" podID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerID="8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887" exitCode=0 Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.569831 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4srtl" event={"ID":"a94f804a-cb4a-4e4e-beff-aaac7f117710","Type":"ContainerDied","Data":"8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887"} Jan 28 19:00:23 crc kubenswrapper[4767]: I0128 19:00:23.666231 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4dhrg"] Jan 28 19:00:24 crc kubenswrapper[4767]: I0128 19:00:24.581999 4767 generic.go:334] "Generic (PLEG): container finished" podID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerID="fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4" exitCode=0 Jan 28 19:00:24 crc kubenswrapper[4767]: I0128 19:00:24.582056 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4dhrg" event={"ID":"7191c158-a8ea-444e-bb84-1e6fd63d6538","Type":"ContainerDied","Data":"fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4"} Jan 28 19:00:24 crc kubenswrapper[4767]: I0128 19:00:24.582500 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4dhrg" event={"ID":"7191c158-a8ea-444e-bb84-1e6fd63d6538","Type":"ContainerStarted","Data":"064e313b3030908078e122e090b5f1e690ce49220a5521810596bce788b63fd3"} Jan 28 19:00:24 crc kubenswrapper[4767]: I0128 19:00:24.586534 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4srtl" event={"ID":"a94f804a-cb4a-4e4e-beff-aaac7f117710","Type":"ContainerStarted","Data":"e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1"} Jan 28 19:00:26 crc kubenswrapper[4767]: I0128 19:00:26.619870 4767 generic.go:334] "Generic (PLEG): container finished" podID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerID="4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a" exitCode=0 Jan 28 19:00:26 crc kubenswrapper[4767]: I0128 19:00:26.619980 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4dhrg" event={"ID":"7191c158-a8ea-444e-bb84-1e6fd63d6538","Type":"ContainerDied","Data":"4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a"} Jan 28 19:00:26 crc kubenswrapper[4767]: I0128 19:00:26.651004 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-4srtl" podStartSLOduration=4.947455104 podStartE2EDuration="7.650983842s" podCreationTimestamp="2026-01-28 19:00:19 +0000 UTC" firstStartedPulling="2026-01-28 19:00:21.542694039 +0000 UTC m=+1827.506876913" lastFinishedPulling="2026-01-28 19:00:24.246222777 +0000 UTC m=+1830.210405651" observedRunningTime="2026-01-28 19:00:24.646576117 +0000 UTC m=+1830.610759011" watchObservedRunningTime="2026-01-28 19:00:26.650983842 +0000 UTC m=+1832.615166716" Jan 28 19:00:27 crc kubenswrapper[4767]: I0128 19:00:27.632391 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4dhrg" event={"ID":"7191c158-a8ea-444e-bb84-1e6fd63d6538","Type":"ContainerStarted","Data":"57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0"} Jan 28 19:00:27 crc kubenswrapper[4767]: I0128 19:00:27.656518 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4dhrg" podStartSLOduration=3.221516376 podStartE2EDuration="5.656497608s" podCreationTimestamp="2026-01-28 19:00:22 +0000 UTC" firstStartedPulling="2026-01-28 19:00:24.583996465 +0000 UTC m=+1830.548179339" lastFinishedPulling="2026-01-28 19:00:27.018977707 +0000 UTC m=+1832.983160571" observedRunningTime="2026-01-28 19:00:27.649691005 +0000 UTC m=+1833.613873879" watchObservedRunningTime="2026-01-28 19:00:27.656497608 +0000 UTC m=+1833.620680492" Jan 28 19:00:27 crc kubenswrapper[4767]: I0128 19:00:27.796368 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:00:27 crc kubenswrapper[4767]: E0128 19:00:27.796728 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:00:30 crc kubenswrapper[4767]: I0128 19:00:30.315977 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:30 crc kubenswrapper[4767]: I0128 19:00:30.316784 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:30 crc kubenswrapper[4767]: I0128 19:00:30.369290 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:30 crc kubenswrapper[4767]: I0128 19:00:30.732553 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:33 crc kubenswrapper[4767]: I0128 19:00:33.318222 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:33 crc kubenswrapper[4767]: I0128 19:00:33.318969 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:33 crc kubenswrapper[4767]: I0128 19:00:33.378372 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:33 crc kubenswrapper[4767]: I0128 19:00:33.785273 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:33 crc kubenswrapper[4767]: I0128 19:00:33.982849 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4srtl"] Jan 28 19:00:33 crc kubenswrapper[4767]: I0128 19:00:33.983874 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-4srtl" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="registry-server" containerID="cri-o://e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1" gracePeriod=2 Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.470859 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.538249 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sg46\" (UniqueName: \"kubernetes.io/projected/a94f804a-cb4a-4e4e-beff-aaac7f117710-kube-api-access-2sg46\") pod \"a94f804a-cb4a-4e4e-beff-aaac7f117710\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.538494 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-catalog-content\") pod \"a94f804a-cb4a-4e4e-beff-aaac7f117710\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.538533 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-utilities\") pod \"a94f804a-cb4a-4e4e-beff-aaac7f117710\" (UID: \"a94f804a-cb4a-4e4e-beff-aaac7f117710\") " Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.539679 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-utilities" (OuterVolumeSpecName: "utilities") pod "a94f804a-cb4a-4e4e-beff-aaac7f117710" (UID: "a94f804a-cb4a-4e4e-beff-aaac7f117710"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.540065 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.579581 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a94f804a-cb4a-4e4e-beff-aaac7f117710-kube-api-access-2sg46" (OuterVolumeSpecName: "kube-api-access-2sg46") pod "a94f804a-cb4a-4e4e-beff-aaac7f117710" (UID: "a94f804a-cb4a-4e4e-beff-aaac7f117710"). InnerVolumeSpecName "kube-api-access-2sg46". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.641798 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sg46\" (UniqueName: \"kubernetes.io/projected/a94f804a-cb4a-4e4e-beff-aaac7f117710-kube-api-access-2sg46\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.666528 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a94f804a-cb4a-4e4e-beff-aaac7f117710" (UID: "a94f804a-cb4a-4e4e-beff-aaac7f117710"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.728267 4767 generic.go:334] "Generic (PLEG): container finished" podID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerID="e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1" exitCode=0 Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.728348 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-4srtl" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.728424 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4srtl" event={"ID":"a94f804a-cb4a-4e4e-beff-aaac7f117710","Type":"ContainerDied","Data":"e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1"} Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.728460 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-4srtl" event={"ID":"a94f804a-cb4a-4e4e-beff-aaac7f117710","Type":"ContainerDied","Data":"50985934048ebb9d87bd74d356534713154262cb41ca5bcb17dec44a9fe384cd"} Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.728488 4767 scope.go:117] "RemoveContainer" containerID="e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.743948 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a94f804a-cb4a-4e4e-beff-aaac7f117710-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.759135 4767 scope.go:117] "RemoveContainer" containerID="8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.770911 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-4srtl"] Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.784673 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-4srtl"] Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.806827 4767 scope.go:117] "RemoveContainer" containerID="b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.812799 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" path="/var/lib/kubelet/pods/a94f804a-cb4a-4e4e-beff-aaac7f117710/volumes" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.834417 4767 scope.go:117] "RemoveContainer" containerID="e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1" Jan 28 19:00:34 crc kubenswrapper[4767]: E0128 19:00:34.834984 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1\": container with ID starting with e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1 not found: ID does not exist" containerID="e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.835016 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1"} err="failed to get container status \"e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1\": rpc error: code = NotFound desc = could not find container \"e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1\": container with ID starting with e889155612262173c08ef5be9d1e8650522621f525e24f3fb8deeae9af3c52e1 not found: ID does not exist" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.835041 4767 scope.go:117] "RemoveContainer" containerID="8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887" Jan 28 19:00:34 crc kubenswrapper[4767]: E0128 19:00:34.835602 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887\": container with ID starting with 8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887 not found: ID does not exist" containerID="8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.835624 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887"} err="failed to get container status \"8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887\": rpc error: code = NotFound desc = could not find container \"8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887\": container with ID starting with 8d423cff1be6858f26ba96ac8039d9cc4c15fa7d18e366835bfb2bc3b8413887 not found: ID does not exist" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.835641 4767 scope.go:117] "RemoveContainer" containerID="b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83" Jan 28 19:00:34 crc kubenswrapper[4767]: E0128 19:00:34.835932 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83\": container with ID starting with b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83 not found: ID does not exist" containerID="b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83" Jan 28 19:00:34 crc kubenswrapper[4767]: I0128 19:00:34.835953 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83"} err="failed to get container status \"b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83\": rpc error: code = NotFound desc = could not find container \"b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83\": container with ID starting with b062e01c85fb348d66ebc4837b50882b2245c4f6ac94c4cbb6e41abbb414ed83 not found: ID does not exist" Jan 28 19:00:37 crc kubenswrapper[4767]: I0128 19:00:37.046033 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-q46gj"] Jan 28 19:00:37 crc kubenswrapper[4767]: I0128 19:00:37.061637 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-q46gj"] Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.184787 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4dhrg"] Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.185103 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4dhrg" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="registry-server" containerID="cri-o://57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0" gracePeriod=2 Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.698512 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.774861 4767 generic.go:334] "Generic (PLEG): container finished" podID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerID="57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0" exitCode=0 Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.774919 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4dhrg" event={"ID":"7191c158-a8ea-444e-bb84-1e6fd63d6538","Type":"ContainerDied","Data":"57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0"} Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.774957 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4dhrg" event={"ID":"7191c158-a8ea-444e-bb84-1e6fd63d6538","Type":"ContainerDied","Data":"064e313b3030908078e122e090b5f1e690ce49220a5521810596bce788b63fd3"} Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.774982 4767 scope.go:117] "RemoveContainer" containerID="57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.775163 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4dhrg" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.796840 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:00:38 crc kubenswrapper[4767]: E0128 19:00:38.797623 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.803082 4767 scope.go:117] "RemoveContainer" containerID="4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.810013 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b867495c-e01f-46a2-aa93-e42cd53d1b64" path="/var/lib/kubelet/pods/b867495c-e01f-46a2-aa93-e42cd53d1b64/volumes" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.832290 4767 scope.go:117] "RemoveContainer" containerID="fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.837666 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hpxp\" (UniqueName: \"kubernetes.io/projected/7191c158-a8ea-444e-bb84-1e6fd63d6538-kube-api-access-7hpxp\") pod \"7191c158-a8ea-444e-bb84-1e6fd63d6538\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.837869 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-catalog-content\") pod \"7191c158-a8ea-444e-bb84-1e6fd63d6538\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.838157 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-utilities\") pod \"7191c158-a8ea-444e-bb84-1e6fd63d6538\" (UID: \"7191c158-a8ea-444e-bb84-1e6fd63d6538\") " Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.838958 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-utilities" (OuterVolumeSpecName: "utilities") pod "7191c158-a8ea-444e-bb84-1e6fd63d6538" (UID: "7191c158-a8ea-444e-bb84-1e6fd63d6538"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.846531 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7191c158-a8ea-444e-bb84-1e6fd63d6538-kube-api-access-7hpxp" (OuterVolumeSpecName: "kube-api-access-7hpxp") pod "7191c158-a8ea-444e-bb84-1e6fd63d6538" (UID: "7191c158-a8ea-444e-bb84-1e6fd63d6538"). InnerVolumeSpecName "kube-api-access-7hpxp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.860771 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7191c158-a8ea-444e-bb84-1e6fd63d6538" (UID: "7191c158-a8ea-444e-bb84-1e6fd63d6538"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.929247 4767 scope.go:117] "RemoveContainer" containerID="57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0" Jan 28 19:00:38 crc kubenswrapper[4767]: E0128 19:00:38.930390 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0\": container with ID starting with 57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0 not found: ID does not exist" containerID="57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.930430 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0"} err="failed to get container status \"57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0\": rpc error: code = NotFound desc = could not find container \"57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0\": container with ID starting with 57331cb35e4fbbae47ce4dedd35497158b24093fe320ced2f9a90f7ba9f450b0 not found: ID does not exist" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.930461 4767 scope.go:117] "RemoveContainer" containerID="4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a" Jan 28 19:00:38 crc kubenswrapper[4767]: E0128 19:00:38.931025 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a\": container with ID starting with 4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a not found: ID does not exist" containerID="4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.931089 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a"} err="failed to get container status \"4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a\": rpc error: code = NotFound desc = could not find container \"4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a\": container with ID starting with 4ce0cfdd739e399f43102068840783bf0d603419f08f5d09b0d846d0b403249a not found: ID does not exist" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.931149 4767 scope.go:117] "RemoveContainer" containerID="fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4" Jan 28 19:00:38 crc kubenswrapper[4767]: E0128 19:00:38.931829 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4\": container with ID starting with fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4 not found: ID does not exist" containerID="fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.931990 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4"} err="failed to get container status \"fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4\": rpc error: code = NotFound desc = could not find container \"fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4\": container with ID starting with fb0074cae68bce5170a3a66e1739caf2e88d29fb67497b8fb22a54d8bf0d70f4 not found: ID does not exist" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.941704 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.941748 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hpxp\" (UniqueName: \"kubernetes.io/projected/7191c158-a8ea-444e-bb84-1e6fd63d6538-kube-api-access-7hpxp\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:38 crc kubenswrapper[4767]: I0128 19:00:38.941762 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7191c158-a8ea-444e-bb84-1e6fd63d6538-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:00:39 crc kubenswrapper[4767]: I0128 19:00:39.121545 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4dhrg"] Jan 28 19:00:39 crc kubenswrapper[4767]: I0128 19:00:39.135593 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4dhrg"] Jan 28 19:00:40 crc kubenswrapper[4767]: I0128 19:00:40.807634 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" path="/var/lib/kubelet/pods/7191c158-a8ea-444e-bb84-1e6fd63d6538/volumes" Jan 28 19:00:46 crc kubenswrapper[4767]: I0128 19:00:46.038437 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-2v2sq"] Jan 28 19:00:46 crc kubenswrapper[4767]: I0128 19:00:46.048027 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-2v2sq"] Jan 28 19:00:46 crc kubenswrapper[4767]: I0128 19:00:46.807325 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a803158d-90c2-492f-a92b-709d0e1f214e" path="/var/lib/kubelet/pods/a803158d-90c2-492f-a92b-709d0e1f214e/volumes" Jan 28 19:00:51 crc kubenswrapper[4767]: I0128 19:00:51.331937 4767 scope.go:117] "RemoveContainer" containerID="a7f007aaccbf51cdfdf73809d3699724db50b3a4abd709dfa57e68b47fb7fbdb" Jan 28 19:00:51 crc kubenswrapper[4767]: I0128 19:00:51.370661 4767 scope.go:117] "RemoveContainer" containerID="8717ec56a843de32925286ff588395d3fd7a138e783baf0d9c100f0227c136f2" Jan 28 19:00:51 crc kubenswrapper[4767]: I0128 19:00:51.437260 4767 scope.go:117] "RemoveContainer" containerID="e7aab6561aff3b78b34ec76883e3527deafa9656c91e2a33fc479da3798f2fd0" Jan 28 19:00:53 crc kubenswrapper[4767]: I0128 19:00:53.797607 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:00:53 crc kubenswrapper[4767]: E0128 19:00:53.798455 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.149754 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29493781-88g67"] Jan 28 19:01:00 crc kubenswrapper[4767]: E0128 19:01:00.152506 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="extract-content" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.152614 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="extract-content" Jan 28 19:01:00 crc kubenswrapper[4767]: E0128 19:01:00.152715 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="extract-utilities" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.152797 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="extract-utilities" Jan 28 19:01:00 crc kubenswrapper[4767]: E0128 19:01:00.152877 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="registry-server" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.152954 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="registry-server" Jan 28 19:01:00 crc kubenswrapper[4767]: E0128 19:01:00.153040 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="extract-content" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.153117 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="extract-content" Jan 28 19:01:00 crc kubenswrapper[4767]: E0128 19:01:00.153236 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="extract-utilities" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.153330 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="extract-utilities" Jan 28 19:01:00 crc kubenswrapper[4767]: E0128 19:01:00.153412 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="registry-server" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.153483 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="registry-server" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.153787 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7191c158-a8ea-444e-bb84-1e6fd63d6538" containerName="registry-server" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.153890 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a94f804a-cb4a-4e4e-beff-aaac7f117710" containerName="registry-server" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.155158 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.162080 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493781-88g67"] Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.268180 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-combined-ca-bundle\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.268731 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-fernet-keys\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.268846 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tr795\" (UniqueName: \"kubernetes.io/projected/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-kube-api-access-tr795\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.268958 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-config-data\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.371367 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-config-data\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.371659 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-combined-ca-bundle\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.371793 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-fernet-keys\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.371973 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tr795\" (UniqueName: \"kubernetes.io/projected/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-kube-api-access-tr795\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.379309 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-fernet-keys\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.381594 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-combined-ca-bundle\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.383168 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-config-data\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.403823 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tr795\" (UniqueName: \"kubernetes.io/projected/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-kube-api-access-tr795\") pod \"keystone-cron-29493781-88g67\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.486108 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:00 crc kubenswrapper[4767]: I0128 19:01:00.964070 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29493781-88g67"] Jan 28 19:01:01 crc kubenswrapper[4767]: I0128 19:01:01.043185 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493781-88g67" event={"ID":"ffabd2f8-aeec-4a2e-a4d7-317682cbe000","Type":"ContainerStarted","Data":"46867484219faa4d48fc40380d47606f873eb9b579c7075679b9ca352428902d"} Jan 28 19:01:02 crc kubenswrapper[4767]: I0128 19:01:02.055383 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493781-88g67" event={"ID":"ffabd2f8-aeec-4a2e-a4d7-317682cbe000","Type":"ContainerStarted","Data":"b8e5b0d1c730435d303787c6ecf382df6b4a72bfb81efbc8899ac53291dba5fa"} Jan 28 19:01:02 crc kubenswrapper[4767]: I0128 19:01:02.079634 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29493781-88g67" podStartSLOduration=2.0796019 podStartE2EDuration="2.0796019s" podCreationTimestamp="2026-01-28 19:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:01:02.075914895 +0000 UTC m=+1868.040097789" watchObservedRunningTime="2026-01-28 19:01:02.0796019 +0000 UTC m=+1868.043784774" Jan 28 19:01:04 crc kubenswrapper[4767]: I0128 19:01:04.078050 4767 generic.go:334] "Generic (PLEG): container finished" podID="ffabd2f8-aeec-4a2e-a4d7-317682cbe000" containerID="b8e5b0d1c730435d303787c6ecf382df6b4a72bfb81efbc8899ac53291dba5fa" exitCode=0 Jan 28 19:01:04 crc kubenswrapper[4767]: I0128 19:01:04.078154 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493781-88g67" event={"ID":"ffabd2f8-aeec-4a2e-a4d7-317682cbe000","Type":"ContainerDied","Data":"b8e5b0d1c730435d303787c6ecf382df6b4a72bfb81efbc8899ac53291dba5fa"} Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.458093 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.494467 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tr795\" (UniqueName: \"kubernetes.io/projected/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-kube-api-access-tr795\") pod \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.494596 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-fernet-keys\") pod \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.494729 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-combined-ca-bundle\") pod \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.494847 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-config-data\") pod \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\" (UID: \"ffabd2f8-aeec-4a2e-a4d7-317682cbe000\") " Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.510697 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-kube-api-access-tr795" (OuterVolumeSpecName: "kube-api-access-tr795") pod "ffabd2f8-aeec-4a2e-a4d7-317682cbe000" (UID: "ffabd2f8-aeec-4a2e-a4d7-317682cbe000"). InnerVolumeSpecName "kube-api-access-tr795". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.511471 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "ffabd2f8-aeec-4a2e-a4d7-317682cbe000" (UID: "ffabd2f8-aeec-4a2e-a4d7-317682cbe000"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.557934 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ffabd2f8-aeec-4a2e-a4d7-317682cbe000" (UID: "ffabd2f8-aeec-4a2e-a4d7-317682cbe000"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.580438 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-config-data" (OuterVolumeSpecName: "config-data") pod "ffabd2f8-aeec-4a2e-a4d7-317682cbe000" (UID: "ffabd2f8-aeec-4a2e-a4d7-317682cbe000"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.597702 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tr795\" (UniqueName: \"kubernetes.io/projected/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-kube-api-access-tr795\") on node \"crc\" DevicePath \"\"" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.597749 4767 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.597761 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:01:05 crc kubenswrapper[4767]: I0128 19:01:05.597771 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ffabd2f8-aeec-4a2e-a4d7-317682cbe000-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:01:06 crc kubenswrapper[4767]: I0128 19:01:06.101683 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29493781-88g67" event={"ID":"ffabd2f8-aeec-4a2e-a4d7-317682cbe000","Type":"ContainerDied","Data":"46867484219faa4d48fc40380d47606f873eb9b579c7075679b9ca352428902d"} Jan 28 19:01:06 crc kubenswrapper[4767]: I0128 19:01:06.102241 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="46867484219faa4d48fc40380d47606f873eb9b579c7075679b9ca352428902d" Jan 28 19:01:06 crc kubenswrapper[4767]: I0128 19:01:06.101824 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29493781-88g67" Jan 28 19:01:07 crc kubenswrapper[4767]: I0128 19:01:07.052169 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-vm9r4"] Jan 28 19:01:07 crc kubenswrapper[4767]: I0128 19:01:07.067260 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-vm9r4"] Jan 28 19:01:08 crc kubenswrapper[4767]: I0128 19:01:08.796817 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:01:08 crc kubenswrapper[4767]: E0128 19:01:08.797181 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:01:08 crc kubenswrapper[4767]: I0128 19:01:08.811878 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28fde299-6f90-4c23-8a4f-15823bd8f4c5" path="/var/lib/kubelet/pods/28fde299-6f90-4c23-8a4f-15823bd8f4c5/volumes" Jan 28 19:01:11 crc kubenswrapper[4767]: I0128 19:01:11.037568 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-8bggl"] Jan 28 19:01:11 crc kubenswrapper[4767]: I0128 19:01:11.049245 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-8bggl"] Jan 28 19:01:12 crc kubenswrapper[4767]: I0128 19:01:12.032021 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-5nj7s"] Jan 28 19:01:12 crc kubenswrapper[4767]: I0128 19:01:12.041719 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-5nj7s"] Jan 28 19:01:12 crc kubenswrapper[4767]: I0128 19:01:12.808270 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="008911ac-269d-47a4-a624-0e789415d794" path="/var/lib/kubelet/pods/008911ac-269d-47a4-a624-0e789415d794/volumes" Jan 28 19:01:12 crc kubenswrapper[4767]: I0128 19:01:12.809421 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5" path="/var/lib/kubelet/pods/b16e2fd7-58e6-48cd-a3c2-b3d702ee11a5/volumes" Jan 28 19:01:20 crc kubenswrapper[4767]: I0128 19:01:20.038629 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/heat-db-sync-5ptf6"] Jan 28 19:01:20 crc kubenswrapper[4767]: I0128 19:01:20.050542 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/heat-db-sync-5ptf6"] Jan 28 19:01:20 crc kubenswrapper[4767]: I0128 19:01:20.796842 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:01:20 crc kubenswrapper[4767]: E0128 19:01:20.797197 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:01:20 crc kubenswrapper[4767]: I0128 19:01:20.815487 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b956870a-eae0-48fb-8e4f-182a9f276308" path="/var/lib/kubelet/pods/b956870a-eae0-48fb-8e4f-182a9f276308/volumes" Jan 28 19:01:22 crc kubenswrapper[4767]: I0128 19:01:22.256970 4767 generic.go:334] "Generic (PLEG): container finished" podID="1ebe359f-1b9c-4278-9c6c-5c72cf619080" containerID="8bdf59f4d73566049b327418e4c6ea99f56d4ac2c570d82fee13283669156ebd" exitCode=0 Jan 28 19:01:22 crc kubenswrapper[4767]: I0128 19:01:22.257079 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" event={"ID":"1ebe359f-1b9c-4278-9c6c-5c72cf619080","Type":"ContainerDied","Data":"8bdf59f4d73566049b327418e4c6ea99f56d4ac2c570d82fee13283669156ebd"} Jan 28 19:01:23 crc kubenswrapper[4767]: I0128 19:01:23.741601 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 19:01:23 crc kubenswrapper[4767]: I0128 19:01:23.913518 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnprx\" (UniqueName: \"kubernetes.io/projected/1ebe359f-1b9c-4278-9c6c-5c72cf619080-kube-api-access-wnprx\") pod \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " Jan 28 19:01:23 crc kubenswrapper[4767]: I0128 19:01:23.913785 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-ssh-key-openstack-edpm-ipam\") pod \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " Jan 28 19:01:23 crc kubenswrapper[4767]: I0128 19:01:23.913909 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-inventory\") pod \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\" (UID: \"1ebe359f-1b9c-4278-9c6c-5c72cf619080\") " Jan 28 19:01:23 crc kubenswrapper[4767]: I0128 19:01:23.921741 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ebe359f-1b9c-4278-9c6c-5c72cf619080-kube-api-access-wnprx" (OuterVolumeSpecName: "kube-api-access-wnprx") pod "1ebe359f-1b9c-4278-9c6c-5c72cf619080" (UID: "1ebe359f-1b9c-4278-9c6c-5c72cf619080"). InnerVolumeSpecName "kube-api-access-wnprx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:01:23 crc kubenswrapper[4767]: I0128 19:01:23.945926 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-inventory" (OuterVolumeSpecName: "inventory") pod "1ebe359f-1b9c-4278-9c6c-5c72cf619080" (UID: "1ebe359f-1b9c-4278-9c6c-5c72cf619080"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:01:23 crc kubenswrapper[4767]: I0128 19:01:23.952052 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1ebe359f-1b9c-4278-9c6c-5c72cf619080" (UID: "1ebe359f-1b9c-4278-9c6c-5c72cf619080"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.016601 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.016638 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1ebe359f-1b9c-4278-9c6c-5c72cf619080-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.016652 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnprx\" (UniqueName: \"kubernetes.io/projected/1ebe359f-1b9c-4278-9c6c-5c72cf619080-kube-api-access-wnprx\") on node \"crc\" DevicePath \"\"" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.285145 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" event={"ID":"1ebe359f-1b9c-4278-9c6c-5c72cf619080","Type":"ContainerDied","Data":"3bc38e7d047508385921ee832664905cab7cf41f5c16b0738310db33a140bb65"} Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.285576 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3bc38e7d047508385921ee832664905cab7cf41f5c16b0738310db33a140bb65" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.285255 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.391058 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q"] Jan 28 19:01:24 crc kubenswrapper[4767]: E0128 19:01:24.391692 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ffabd2f8-aeec-4a2e-a4d7-317682cbe000" containerName="keystone-cron" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.391716 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="ffabd2f8-aeec-4a2e-a4d7-317682cbe000" containerName="keystone-cron" Jan 28 19:01:24 crc kubenswrapper[4767]: E0128 19:01:24.391733 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ebe359f-1b9c-4278-9c6c-5c72cf619080" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.391744 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ebe359f-1b9c-4278-9c6c-5c72cf619080" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.391975 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ebe359f-1b9c-4278-9c6c-5c72cf619080" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.391996 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="ffabd2f8-aeec-4a2e-a4d7-317682cbe000" containerName="keystone-cron" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.392834 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.396731 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.397002 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.398272 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.403425 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q"] Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.467363 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.565919 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9slbn\" (UniqueName: \"kubernetes.io/projected/272b34a9-db41-4ec5-ab16-10a08a84bd34-kube-api-access-9slbn\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.566048 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.566275 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.668483 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.668602 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9slbn\" (UniqueName: \"kubernetes.io/projected/272b34a9-db41-4ec5-ab16-10a08a84bd34-kube-api-access-9slbn\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.668687 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.674380 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-ssh-key-openstack-edpm-ipam\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.674857 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.689344 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9slbn\" (UniqueName: \"kubernetes.io/projected/272b34a9-db41-4ec5-ab16-10a08a84bd34-kube-api-access-9slbn\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:24 crc kubenswrapper[4767]: I0128 19:01:24.786393 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:01:25 crc kubenswrapper[4767]: I0128 19:01:25.346859 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q"] Jan 28 19:01:26 crc kubenswrapper[4767]: I0128 19:01:26.310141 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" event={"ID":"272b34a9-db41-4ec5-ab16-10a08a84bd34","Type":"ContainerStarted","Data":"21ac2c5aee8b1719571861c2b9edb4a11b0ffdd98045c9e48b92c12aa2b8853f"} Jan 28 19:01:26 crc kubenswrapper[4767]: I0128 19:01:26.311025 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" event={"ID":"272b34a9-db41-4ec5-ab16-10a08a84bd34","Type":"ContainerStarted","Data":"496cb00e6a3136fed9d55d500b9c621ac40953312e3162ce0efec653dc6300ef"} Jan 28 19:01:26 crc kubenswrapper[4767]: I0128 19:01:26.340699 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" podStartSLOduration=1.790499474 podStartE2EDuration="2.340668385s" podCreationTimestamp="2026-01-28 19:01:24 +0000 UTC" firstStartedPulling="2026-01-28 19:01:25.344651397 +0000 UTC m=+1891.308834281" lastFinishedPulling="2026-01-28 19:01:25.894820318 +0000 UTC m=+1891.859003192" observedRunningTime="2026-01-28 19:01:26.330820076 +0000 UTC m=+1892.295002950" watchObservedRunningTime="2026-01-28 19:01:26.340668385 +0000 UTC m=+1892.304851259" Jan 28 19:01:32 crc kubenswrapper[4767]: I0128 19:01:32.795715 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:01:32 crc kubenswrapper[4767]: E0128 19:01:32.796576 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:01:46 crc kubenswrapper[4767]: I0128 19:01:46.049709 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-kdhmb"] Jan 28 19:01:46 crc kubenswrapper[4767]: I0128 19:01:46.059786 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-kdhmb"] Jan 28 19:01:46 crc kubenswrapper[4767]: I0128 19:01:46.811543 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbd7dd91-c84a-442f-86af-c3a06ca9a373" path="/var/lib/kubelet/pods/cbd7dd91-c84a-442f-86af-c3a06ca9a373/volumes" Jan 28 19:01:47 crc kubenswrapper[4767]: I0128 19:01:47.795516 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:01:48 crc kubenswrapper[4767]: I0128 19:01:48.551761 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"967b8a4723891c56d9bbc8957571ded4ea00629b76b3b712cd2b57f9a1382c6f"} Jan 28 19:01:51 crc kubenswrapper[4767]: I0128 19:01:51.612110 4767 scope.go:117] "RemoveContainer" containerID="9cb422e929b3359f7658bb7c5266ba58cce4381d8d29730cc33c98395f2b24b0" Jan 28 19:01:51 crc kubenswrapper[4767]: I0128 19:01:51.654175 4767 scope.go:117] "RemoveContainer" containerID="5694634af50740938137546ad08c9781975843d69d312ad15e40d7db6424d4a0" Jan 28 19:01:51 crc kubenswrapper[4767]: I0128 19:01:51.707626 4767 scope.go:117] "RemoveContainer" containerID="8fe6bb77e3b376735c42a645ecd669af4d9031c78aecf256f09b77a482fbe827" Jan 28 19:01:51 crc kubenswrapper[4767]: I0128 19:01:51.768314 4767 scope.go:117] "RemoveContainer" containerID="305774a65ec9e3bddc549073e9797ae194a1589e90e26f371c30f463727655cc" Jan 28 19:01:51 crc kubenswrapper[4767]: I0128 19:01:51.840226 4767 scope.go:117] "RemoveContainer" containerID="d46fa9f61d3a844beeae98cfaa9a563690b5a6876b170d769db6eec982b81858" Jan 28 19:02:09 crc kubenswrapper[4767]: I0128 19:02:09.056459 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-8zgk9"] Jan 28 19:02:09 crc kubenswrapper[4767]: I0128 19:02:09.067446 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-5p9bh"] Jan 28 19:02:09 crc kubenswrapper[4767]: I0128 19:02:09.079256 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-pfccx"] Jan 28 19:02:09 crc kubenswrapper[4767]: I0128 19:02:09.089661 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-8zgk9"] Jan 28 19:02:09 crc kubenswrapper[4767]: I0128 19:02:09.098287 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-pfccx"] Jan 28 19:02:09 crc kubenswrapper[4767]: I0128 19:02:09.106611 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-5p9bh"] Jan 28 19:02:10 crc kubenswrapper[4767]: I0128 19:02:10.033512 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-f207-account-create-update-4rckz"] Jan 28 19:02:10 crc kubenswrapper[4767]: I0128 19:02:10.045412 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-f207-account-create-update-4rckz"] Jan 28 19:02:10 crc kubenswrapper[4767]: I0128 19:02:10.810570 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b1cdbe43-1ad1-46dd-9349-b7322d4068a0" path="/var/lib/kubelet/pods/b1cdbe43-1ad1-46dd-9349-b7322d4068a0/volumes" Jan 28 19:02:10 crc kubenswrapper[4767]: I0128 19:02:10.813109 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c" path="/var/lib/kubelet/pods/c90e2e5f-5ae5-4cc8-8169-a931a35f2d9c/volumes" Jan 28 19:02:10 crc kubenswrapper[4767]: I0128 19:02:10.815873 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e93b8328-4f9c-47da-8af8-c86f5acf443b" path="/var/lib/kubelet/pods/e93b8328-4f9c-47da-8af8-c86f5acf443b/volumes" Jan 28 19:02:10 crc kubenswrapper[4767]: I0128 19:02:10.816747 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4fecf89-74fc-47bd-83ff-876cc16e8dc8" path="/var/lib/kubelet/pods/f4fecf89-74fc-47bd-83ff-876cc16e8dc8/volumes" Jan 28 19:02:11 crc kubenswrapper[4767]: I0128 19:02:11.050388 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-6d16-account-create-update-4r74q"] Jan 28 19:02:11 crc kubenswrapper[4767]: I0128 19:02:11.065613 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-2cb5-account-create-update-tccrf"] Jan 28 19:02:11 crc kubenswrapper[4767]: I0128 19:02:11.079020 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-6d16-account-create-update-4r74q"] Jan 28 19:02:11 crc kubenswrapper[4767]: I0128 19:02:11.101951 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-2cb5-account-create-update-tccrf"] Jan 28 19:02:12 crc kubenswrapper[4767]: I0128 19:02:12.807515 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10bc51c5-1acb-4d5f-9fde-fe1b17388f51" path="/var/lib/kubelet/pods/10bc51c5-1acb-4d5f-9fde-fe1b17388f51/volumes" Jan 28 19:02:12 crc kubenswrapper[4767]: I0128 19:02:12.808971 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a673b46b-2808-4772-963d-7f2ae90be2fe" path="/var/lib/kubelet/pods/a673b46b-2808-4772-963d-7f2ae90be2fe/volumes" Jan 28 19:02:40 crc kubenswrapper[4767]: I0128 19:02:40.117163 4767 generic.go:334] "Generic (PLEG): container finished" podID="272b34a9-db41-4ec5-ab16-10a08a84bd34" containerID="21ac2c5aee8b1719571861c2b9edb4a11b0ffdd98045c9e48b92c12aa2b8853f" exitCode=0 Jan 28 19:02:40 crc kubenswrapper[4767]: I0128 19:02:40.117400 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" event={"ID":"272b34a9-db41-4ec5-ab16-10a08a84bd34","Type":"ContainerDied","Data":"21ac2c5aee8b1719571861c2b9edb4a11b0ffdd98045c9e48b92c12aa2b8853f"} Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.596967 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.737870 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-ssh-key-openstack-edpm-ipam\") pod \"272b34a9-db41-4ec5-ab16-10a08a84bd34\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.737978 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9slbn\" (UniqueName: \"kubernetes.io/projected/272b34a9-db41-4ec5-ab16-10a08a84bd34-kube-api-access-9slbn\") pod \"272b34a9-db41-4ec5-ab16-10a08a84bd34\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.738008 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-inventory\") pod \"272b34a9-db41-4ec5-ab16-10a08a84bd34\" (UID: \"272b34a9-db41-4ec5-ab16-10a08a84bd34\") " Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.785671 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/272b34a9-db41-4ec5-ab16-10a08a84bd34-kube-api-access-9slbn" (OuterVolumeSpecName: "kube-api-access-9slbn") pod "272b34a9-db41-4ec5-ab16-10a08a84bd34" (UID: "272b34a9-db41-4ec5-ab16-10a08a84bd34"). InnerVolumeSpecName "kube-api-access-9slbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.792109 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-inventory" (OuterVolumeSpecName: "inventory") pod "272b34a9-db41-4ec5-ab16-10a08a84bd34" (UID: "272b34a9-db41-4ec5-ab16-10a08a84bd34"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.793314 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "272b34a9-db41-4ec5-ab16-10a08a84bd34" (UID: "272b34a9-db41-4ec5-ab16-10a08a84bd34"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.840886 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.840943 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9slbn\" (UniqueName: \"kubernetes.io/projected/272b34a9-db41-4ec5-ab16-10a08a84bd34-kube-api-access-9slbn\") on node \"crc\" DevicePath \"\"" Jan 28 19:02:41 crc kubenswrapper[4767]: I0128 19:02:41.840957 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/272b34a9-db41-4ec5-ab16-10a08a84bd34-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.144579 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" event={"ID":"272b34a9-db41-4ec5-ab16-10a08a84bd34","Type":"ContainerDied","Data":"496cb00e6a3136fed9d55d500b9c621ac40953312e3162ce0efec653dc6300ef"} Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.144637 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="496cb00e6a3136fed9d55d500b9c621ac40953312e3162ce0efec653dc6300ef" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.144679 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.258358 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl"] Jan 28 19:02:42 crc kubenswrapper[4767]: E0128 19:02:42.259188 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="272b34a9-db41-4ec5-ab16-10a08a84bd34" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.259262 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="272b34a9-db41-4ec5-ab16-10a08a84bd34" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.259551 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="272b34a9-db41-4ec5-ab16-10a08a84bd34" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.260343 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.263136 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.263456 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.264302 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.264581 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.273750 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl"] Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.454756 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.454884 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2vcnq\" (UniqueName: \"kubernetes.io/projected/e37ab6f3-785e-4437-b1e3-8e5316868389-kube-api-access-2vcnq\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.454957 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.557312 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.557504 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.557547 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2vcnq\" (UniqueName: \"kubernetes.io/projected/e37ab6f3-785e-4437-b1e3-8e5316868389-kube-api-access-2vcnq\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.566041 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.567142 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-ssh-key-openstack-edpm-ipam\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.579776 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2vcnq\" (UniqueName: \"kubernetes.io/projected/e37ab6f3-785e-4437-b1e3-8e5316868389-kube-api-access-2vcnq\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:42 crc kubenswrapper[4767]: I0128 19:02:42.581491 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:43 crc kubenswrapper[4767]: I0128 19:02:43.132643 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl"] Jan 28 19:02:43 crc kubenswrapper[4767]: I0128 19:02:43.161997 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" event={"ID":"e37ab6f3-785e-4437-b1e3-8e5316868389","Type":"ContainerStarted","Data":"49ca2c02c961d396a5dd5fed1d022c0202c2e29506c0e8527635a468d2e501e8"} Jan 28 19:02:45 crc kubenswrapper[4767]: I0128 19:02:45.188428 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" event={"ID":"e37ab6f3-785e-4437-b1e3-8e5316868389","Type":"ContainerStarted","Data":"f3bbd3e6a10990efdfdea8e960efa55ec8ecf929968573553c023f3ac192742d"} Jan 28 19:02:49 crc kubenswrapper[4767]: I0128 19:02:49.042781 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" podStartSLOduration=6.021161026 podStartE2EDuration="7.042761577s" podCreationTimestamp="2026-01-28 19:02:42 +0000 UTC" firstStartedPulling="2026-01-28 19:02:43.146222575 +0000 UTC m=+1969.110405449" lastFinishedPulling="2026-01-28 19:02:44.167823116 +0000 UTC m=+1970.132006000" observedRunningTime="2026-01-28 19:02:45.215593997 +0000 UTC m=+1971.179776881" watchObservedRunningTime="2026-01-28 19:02:49.042761577 +0000 UTC m=+1975.006944451" Jan 28 19:02:49 crc kubenswrapper[4767]: I0128 19:02:49.052866 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2lrw9"] Jan 28 19:02:49 crc kubenswrapper[4767]: I0128 19:02:49.066097 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-2lrw9"] Jan 28 19:02:49 crc kubenswrapper[4767]: I0128 19:02:49.229456 4767 generic.go:334] "Generic (PLEG): container finished" podID="e37ab6f3-785e-4437-b1e3-8e5316868389" containerID="f3bbd3e6a10990efdfdea8e960efa55ec8ecf929968573553c023f3ac192742d" exitCode=0 Jan 28 19:02:49 crc kubenswrapper[4767]: I0128 19:02:49.229556 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" event={"ID":"e37ab6f3-785e-4437-b1e3-8e5316868389","Type":"ContainerDied","Data":"f3bbd3e6a10990efdfdea8e960efa55ec8ecf929968573553c023f3ac192742d"} Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.700813 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.806421 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb607654-d519-4a0c-bd79-337c1340c237" path="/var/lib/kubelet/pods/cb607654-d519-4a0c-bd79-337c1340c237/volumes" Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.854694 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-ssh-key-openstack-edpm-ipam\") pod \"e37ab6f3-785e-4437-b1e3-8e5316868389\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.854883 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2vcnq\" (UniqueName: \"kubernetes.io/projected/e37ab6f3-785e-4437-b1e3-8e5316868389-kube-api-access-2vcnq\") pod \"e37ab6f3-785e-4437-b1e3-8e5316868389\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.855121 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-inventory\") pod \"e37ab6f3-785e-4437-b1e3-8e5316868389\" (UID: \"e37ab6f3-785e-4437-b1e3-8e5316868389\") " Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.862729 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e37ab6f3-785e-4437-b1e3-8e5316868389-kube-api-access-2vcnq" (OuterVolumeSpecName: "kube-api-access-2vcnq") pod "e37ab6f3-785e-4437-b1e3-8e5316868389" (UID: "e37ab6f3-785e-4437-b1e3-8e5316868389"). InnerVolumeSpecName "kube-api-access-2vcnq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.889391 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "e37ab6f3-785e-4437-b1e3-8e5316868389" (UID: "e37ab6f3-785e-4437-b1e3-8e5316868389"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.889925 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-inventory" (OuterVolumeSpecName: "inventory") pod "e37ab6f3-785e-4437-b1e3-8e5316868389" (UID: "e37ab6f3-785e-4437-b1e3-8e5316868389"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.957095 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.957129 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2vcnq\" (UniqueName: \"kubernetes.io/projected/e37ab6f3-785e-4437-b1e3-8e5316868389-kube-api-access-2vcnq\") on node \"crc\" DevicePath \"\"" Jan 28 19:02:50 crc kubenswrapper[4767]: I0128 19:02:50.957141 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e37ab6f3-785e-4437-b1e3-8e5316868389-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.249020 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" event={"ID":"e37ab6f3-785e-4437-b1e3-8e5316868389","Type":"ContainerDied","Data":"49ca2c02c961d396a5dd5fed1d022c0202c2e29506c0e8527635a468d2e501e8"} Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.249070 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="49ca2c02c961d396a5dd5fed1d022c0202c2e29506c0e8527635a468d2e501e8" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.249101 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.331416 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf"] Jan 28 19:02:51 crc kubenswrapper[4767]: E0128 19:02:51.331988 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e37ab6f3-785e-4437-b1e3-8e5316868389" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.332012 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e37ab6f3-785e-4437-b1e3-8e5316868389" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.332195 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e37ab6f3-785e-4437-b1e3-8e5316868389" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.333049 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.336123 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.336398 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.340571 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.340864 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.348463 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf"] Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.469463 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.469513 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjctv\" (UniqueName: \"kubernetes.io/projected/b130c420-d8a7-4063-a098-4b16682078be-kube-api-access-zjctv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.469556 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.588478 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.588593 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjctv\" (UniqueName: \"kubernetes.io/projected/b130c420-d8a7-4063-a098-4b16682078be-kube-api-access-zjctv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.588764 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.596813 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-ssh-key-openstack-edpm-ipam\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.599857 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.610470 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjctv\" (UniqueName: \"kubernetes.io/projected/b130c420-d8a7-4063-a098-4b16682078be-kube-api-access-zjctv\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-sb8nf\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.694431 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:02:51 crc kubenswrapper[4767]: I0128 19:02:51.991551 4767 scope.go:117] "RemoveContainer" containerID="277b8c35a4f17e6299df17af71aedd63abd30e506e1d19b236ea96676bdbf6d4" Jan 28 19:02:52 crc kubenswrapper[4767]: I0128 19:02:52.017430 4767 scope.go:117] "RemoveContainer" containerID="2107bb8cf5993b9b2b5f431cdd50b5a489a1330c563c354cd74d146d1dc60585" Jan 28 19:02:52 crc kubenswrapper[4767]: I0128 19:02:52.071509 4767 scope.go:117] "RemoveContainer" containerID="c3a86a892dbd1b355c3a23b4a5db80f85ad44aec4988db387b065de363b1033e" Jan 28 19:02:52 crc kubenswrapper[4767]: I0128 19:02:52.155060 4767 scope.go:117] "RemoveContainer" containerID="192deb188367593ab30c3ab7eda6ea984473c509f676a8d58e0fef9ab2a99778" Jan 28 19:02:52 crc kubenswrapper[4767]: I0128 19:02:52.186793 4767 scope.go:117] "RemoveContainer" containerID="2ed2838a73d2aa52ba7b4d23ad3d4b5a43fc236a521102dd5e2365fefd270f79" Jan 28 19:02:52 crc kubenswrapper[4767]: I0128 19:02:52.214276 4767 scope.go:117] "RemoveContainer" containerID="228d3a5fb5a3dedcc6f264527a158ac4d4054c837492113ee1e0e32f3da46807" Jan 28 19:02:52 crc kubenswrapper[4767]: I0128 19:02:52.243524 4767 scope.go:117] "RemoveContainer" containerID="d4a63588f30cf808b3b7a706a1f6f6047a2f8b069244fec9d3362944c8e19a66" Jan 28 19:02:52 crc kubenswrapper[4767]: I0128 19:02:52.252556 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf"] Jan 28 19:02:53 crc kubenswrapper[4767]: I0128 19:02:53.282263 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" event={"ID":"b130c420-d8a7-4063-a098-4b16682078be","Type":"ContainerStarted","Data":"e61b174ccc88a32652267173c6aab570612a7ed46ab239629adf870802e65a74"} Jan 28 19:02:53 crc kubenswrapper[4767]: I0128 19:02:53.282704 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" event={"ID":"b130c420-d8a7-4063-a098-4b16682078be","Type":"ContainerStarted","Data":"a2d33885c944a812c00eb5b393ea45fbda6d29e0c5c09efcfdd920f6b66fff56"} Jan 28 19:02:53 crc kubenswrapper[4767]: I0128 19:02:53.305650 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" podStartSLOduration=1.739161845 podStartE2EDuration="2.305626477s" podCreationTimestamp="2026-01-28 19:02:51 +0000 UTC" firstStartedPulling="2026-01-28 19:02:52.275920921 +0000 UTC m=+1978.240103805" lastFinishedPulling="2026-01-28 19:02:52.842385563 +0000 UTC m=+1978.806568437" observedRunningTime="2026-01-28 19:02:53.299712881 +0000 UTC m=+1979.263895785" watchObservedRunningTime="2026-01-28 19:02:53.305626477 +0000 UTC m=+1979.269809361" Jan 28 19:03:14 crc kubenswrapper[4767]: I0128 19:03:14.061289 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-8x72k"] Jan 28 19:03:14 crc kubenswrapper[4767]: I0128 19:03:14.072140 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-8x72k"] Jan 28 19:03:14 crc kubenswrapper[4767]: I0128 19:03:14.809352 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9f9d061-f0ef-4bb6-8b3c-82838fedc33f" path="/var/lib/kubelet/pods/f9f9d061-f0ef-4bb6-8b3c-82838fedc33f/volumes" Jan 28 19:03:28 crc kubenswrapper[4767]: I0128 19:03:28.648852 4767 generic.go:334] "Generic (PLEG): container finished" podID="b130c420-d8a7-4063-a098-4b16682078be" containerID="e61b174ccc88a32652267173c6aab570612a7ed46ab239629adf870802e65a74" exitCode=0 Jan 28 19:03:28 crc kubenswrapper[4767]: I0128 19:03:28.648969 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" event={"ID":"b130c420-d8a7-4063-a098-4b16682078be","Type":"ContainerDied","Data":"e61b174ccc88a32652267173c6aab570612a7ed46ab239629adf870802e65a74"} Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.119327 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.169353 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-ssh-key-openstack-edpm-ipam\") pod \"b130c420-d8a7-4063-a098-4b16682078be\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.169459 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zjctv\" (UniqueName: \"kubernetes.io/projected/b130c420-d8a7-4063-a098-4b16682078be-kube-api-access-zjctv\") pod \"b130c420-d8a7-4063-a098-4b16682078be\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.169538 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-inventory\") pod \"b130c420-d8a7-4063-a098-4b16682078be\" (UID: \"b130c420-d8a7-4063-a098-4b16682078be\") " Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.175645 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b130c420-d8a7-4063-a098-4b16682078be-kube-api-access-zjctv" (OuterVolumeSpecName: "kube-api-access-zjctv") pod "b130c420-d8a7-4063-a098-4b16682078be" (UID: "b130c420-d8a7-4063-a098-4b16682078be"). InnerVolumeSpecName "kube-api-access-zjctv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.198309 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-inventory" (OuterVolumeSpecName: "inventory") pod "b130c420-d8a7-4063-a098-4b16682078be" (UID: "b130c420-d8a7-4063-a098-4b16682078be"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.202325 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b130c420-d8a7-4063-a098-4b16682078be" (UID: "b130c420-d8a7-4063-a098-4b16682078be"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.271408 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zjctv\" (UniqueName: \"kubernetes.io/projected/b130c420-d8a7-4063-a098-4b16682078be-kube-api-access-zjctv\") on node \"crc\" DevicePath \"\"" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.271651 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.271723 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b130c420-d8a7-4063-a098-4b16682078be-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.669303 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" event={"ID":"b130c420-d8a7-4063-a098-4b16682078be","Type":"ContainerDied","Data":"a2d33885c944a812c00eb5b393ea45fbda6d29e0c5c09efcfdd920f6b66fff56"} Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.669583 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a2d33885c944a812c00eb5b393ea45fbda6d29e0c5c09efcfdd920f6b66fff56" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.669443 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-sb8nf" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.759709 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx"] Jan 28 19:03:30 crc kubenswrapper[4767]: E0128 19:03:30.760695 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b130c420-d8a7-4063-a098-4b16682078be" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.760780 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b130c420-d8a7-4063-a098-4b16682078be" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.761034 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b130c420-d8a7-4063-a098-4b16682078be" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.761799 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.764287 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.764852 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.765020 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.765024 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.769757 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx"] Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.887191 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.887274 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.887479 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7d9lv\" (UniqueName: \"kubernetes.io/projected/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-kube-api-access-7d9lv\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.989826 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7d9lv\" (UniqueName: \"kubernetes.io/projected/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-kube-api-access-7d9lv\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.989971 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.990003 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.995082 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-ssh-key-openstack-edpm-ipam\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:30 crc kubenswrapper[4767]: I0128 19:03:30.996672 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:31 crc kubenswrapper[4767]: I0128 19:03:31.008994 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7d9lv\" (UniqueName: \"kubernetes.io/projected/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-kube-api-access-7d9lv\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:31 crc kubenswrapper[4767]: I0128 19:03:31.090750 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:03:31 crc kubenswrapper[4767]: I0128 19:03:31.675670 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx"] Jan 28 19:03:32 crc kubenswrapper[4767]: I0128 19:03:32.710029 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" event={"ID":"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8","Type":"ContainerStarted","Data":"e622e5ae800b48754b3a76f8f3ac7c1ea62277eea9bf34c0185e1e5c2aa28c90"} Jan 28 19:03:32 crc kubenswrapper[4767]: I0128 19:03:32.710688 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" event={"ID":"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8","Type":"ContainerStarted","Data":"7dc1472da62b0a056808a933e280e5e2e2d6aec1368eb3f3c40a04d779547757"} Jan 28 19:03:32 crc kubenswrapper[4767]: I0128 19:03:32.738501 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" podStartSLOduration=2.062370314 podStartE2EDuration="2.738467032s" podCreationTimestamp="2026-01-28 19:03:30 +0000 UTC" firstStartedPulling="2026-01-28 19:03:31.678367658 +0000 UTC m=+2017.642550532" lastFinishedPulling="2026-01-28 19:03:32.354464376 +0000 UTC m=+2018.318647250" observedRunningTime="2026-01-28 19:03:32.728790768 +0000 UTC m=+2018.692973642" watchObservedRunningTime="2026-01-28 19:03:32.738467032 +0000 UTC m=+2018.702649906" Jan 28 19:03:45 crc kubenswrapper[4767]: I0128 19:03:45.054306 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t8vsr"] Jan 28 19:03:45 crc kubenswrapper[4767]: I0128 19:03:45.062549 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-t8vsr"] Jan 28 19:03:46 crc kubenswrapper[4767]: I0128 19:03:46.810851 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd968858-329e-4d57-8cd7-364a5e852eea" path="/var/lib/kubelet/pods/bd968858-329e-4d57-8cd7-364a5e852eea/volumes" Jan 28 19:03:52 crc kubenswrapper[4767]: I0128 19:03:52.410048 4767 scope.go:117] "RemoveContainer" containerID="e073f7a0342841c72e27008fe373a0b937c630b20a94bc7a967404b5f9f4202f" Jan 28 19:03:52 crc kubenswrapper[4767]: I0128 19:03:52.469924 4767 scope.go:117] "RemoveContainer" containerID="b8eea234273b2b3910c136fb5ad82f22bb028b9f1b75f8ec921682db0abe37ed" Jan 28 19:04:11 crc kubenswrapper[4767]: I0128 19:04:11.053196 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-6rtsh"] Jan 28 19:04:11 crc kubenswrapper[4767]: I0128 19:04:11.063182 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-6rtsh"] Jan 28 19:04:12 crc kubenswrapper[4767]: I0128 19:04:12.806946 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e74c600-b1ab-45a5-b4ea-171ceb03d9bf" path="/var/lib/kubelet/pods/9e74c600-b1ab-45a5-b4ea-171ceb03d9bf/volumes" Jan 28 19:04:15 crc kubenswrapper[4767]: I0128 19:04:15.455121 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:04:15 crc kubenswrapper[4767]: I0128 19:04:15.455549 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:04:20 crc kubenswrapper[4767]: I0128 19:04:20.159057 4767 generic.go:334] "Generic (PLEG): container finished" podID="7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8" containerID="e622e5ae800b48754b3a76f8f3ac7c1ea62277eea9bf34c0185e1e5c2aa28c90" exitCode=0 Jan 28 19:04:20 crc kubenswrapper[4767]: I0128 19:04:20.159138 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" event={"ID":"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8","Type":"ContainerDied","Data":"e622e5ae800b48754b3a76f8f3ac7c1ea62277eea9bf34c0185e1e5c2aa28c90"} Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.624013 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.710257 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-ssh-key-openstack-edpm-ipam\") pod \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.711090 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-inventory\") pod \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.711140 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7d9lv\" (UniqueName: \"kubernetes.io/projected/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-kube-api-access-7d9lv\") pod \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\" (UID: \"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8\") " Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.728670 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-kube-api-access-7d9lv" (OuterVolumeSpecName: "kube-api-access-7d9lv") pod "7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8" (UID: "7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8"). InnerVolumeSpecName "kube-api-access-7d9lv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.749660 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8" (UID: "7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.753431 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-inventory" (OuterVolumeSpecName: "inventory") pod "7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8" (UID: "7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.813666 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.813708 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:21 crc kubenswrapper[4767]: I0128 19:04:21.813719 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7d9lv\" (UniqueName: \"kubernetes.io/projected/7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8-kube-api-access-7d9lv\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.179903 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" event={"ID":"7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8","Type":"ContainerDied","Data":"7dc1472da62b0a056808a933e280e5e2e2d6aec1368eb3f3c40a04d779547757"} Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.179959 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7dc1472da62b0a056808a933e280e5e2e2d6aec1368eb3f3c40a04d779547757" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.179973 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.290036 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-wqd2v"] Jan 28 19:04:22 crc kubenswrapper[4767]: E0128 19:04:22.290564 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.290591 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.290766 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.291542 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.295389 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.295430 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.295771 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.296163 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.302667 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-wqd2v"] Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.427151 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.427325 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.427358 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw66s\" (UniqueName: \"kubernetes.io/projected/88a3dbaa-6957-40e2-ad04-32b9a2516a40-kube-api-access-lw66s\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.529162 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.529343 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.529418 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw66s\" (UniqueName: \"kubernetes.io/projected/88a3dbaa-6957-40e2-ad04-32b9a2516a40-kube-api-access-lw66s\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.534351 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.534394 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.549879 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw66s\" (UniqueName: \"kubernetes.io/projected/88a3dbaa-6957-40e2-ad04-32b9a2516a40-kube-api-access-lw66s\") pod \"ssh-known-hosts-edpm-deployment-wqd2v\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:22 crc kubenswrapper[4767]: I0128 19:04:22.613159 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:23 crc kubenswrapper[4767]: I0128 19:04:23.164966 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-wqd2v"] Jan 28 19:04:23 crc kubenswrapper[4767]: I0128 19:04:23.207686 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" event={"ID":"88a3dbaa-6957-40e2-ad04-32b9a2516a40","Type":"ContainerStarted","Data":"a6d154e5c7ccceedc15c59669a7c724ba512e7d278b09e552347b1f778479b65"} Jan 28 19:04:24 crc kubenswrapper[4767]: I0128 19:04:24.221179 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" event={"ID":"88a3dbaa-6957-40e2-ad04-32b9a2516a40","Type":"ContainerStarted","Data":"fe8ca008c3d640d3d98f730ca4a9a9f6bee7e64cb5b3fc5e071e23d4dd0e7941"} Jan 28 19:04:24 crc kubenswrapper[4767]: I0128 19:04:24.242004 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" podStartSLOduration=1.656410937 podStartE2EDuration="2.241967709s" podCreationTimestamp="2026-01-28 19:04:22 +0000 UTC" firstStartedPulling="2026-01-28 19:04:23.171132827 +0000 UTC m=+2069.135315701" lastFinishedPulling="2026-01-28 19:04:23.756689579 +0000 UTC m=+2069.720872473" observedRunningTime="2026-01-28 19:04:24.238388577 +0000 UTC m=+2070.202571471" watchObservedRunningTime="2026-01-28 19:04:24.241967709 +0000 UTC m=+2070.206150603" Jan 28 19:04:31 crc kubenswrapper[4767]: I0128 19:04:31.299706 4767 generic.go:334] "Generic (PLEG): container finished" podID="88a3dbaa-6957-40e2-ad04-32b9a2516a40" containerID="fe8ca008c3d640d3d98f730ca4a9a9f6bee7e64cb5b3fc5e071e23d4dd0e7941" exitCode=0 Jan 28 19:04:31 crc kubenswrapper[4767]: I0128 19:04:31.299817 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" event={"ID":"88a3dbaa-6957-40e2-ad04-32b9a2516a40","Type":"ContainerDied","Data":"fe8ca008c3d640d3d98f730ca4a9a9f6bee7e64cb5b3fc5e071e23d4dd0e7941"} Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.767786 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.872598 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw66s\" (UniqueName: \"kubernetes.io/projected/88a3dbaa-6957-40e2-ad04-32b9a2516a40-kube-api-access-lw66s\") pod \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.872670 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-ssh-key-openstack-edpm-ipam\") pod \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.872743 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-inventory-0\") pod \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\" (UID: \"88a3dbaa-6957-40e2-ad04-32b9a2516a40\") " Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.879056 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88a3dbaa-6957-40e2-ad04-32b9a2516a40-kube-api-access-lw66s" (OuterVolumeSpecName: "kube-api-access-lw66s") pod "88a3dbaa-6957-40e2-ad04-32b9a2516a40" (UID: "88a3dbaa-6957-40e2-ad04-32b9a2516a40"). InnerVolumeSpecName "kube-api-access-lw66s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.904995 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "88a3dbaa-6957-40e2-ad04-32b9a2516a40" (UID: "88a3dbaa-6957-40e2-ad04-32b9a2516a40"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.905431 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "88a3dbaa-6957-40e2-ad04-32b9a2516a40" (UID: "88a3dbaa-6957-40e2-ad04-32b9a2516a40"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.975521 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw66s\" (UniqueName: \"kubernetes.io/projected/88a3dbaa-6957-40e2-ad04-32b9a2516a40-kube-api-access-lw66s\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.975572 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:32 crc kubenswrapper[4767]: I0128 19:04:32.975583 4767 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/88a3dbaa-6957-40e2-ad04-32b9a2516a40-inventory-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.320573 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" event={"ID":"88a3dbaa-6957-40e2-ad04-32b9a2516a40","Type":"ContainerDied","Data":"a6d154e5c7ccceedc15c59669a7c724ba512e7d278b09e552347b1f778479b65"} Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.321174 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6d154e5c7ccceedc15c59669a7c724ba512e7d278b09e552347b1f778479b65" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.320641 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-wqd2v" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.398410 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb"] Jan 28 19:04:33 crc kubenswrapper[4767]: E0128 19:04:33.399008 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88a3dbaa-6957-40e2-ad04-32b9a2516a40" containerName="ssh-known-hosts-edpm-deployment" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.399037 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="88a3dbaa-6957-40e2-ad04-32b9a2516a40" containerName="ssh-known-hosts-edpm-deployment" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.399259 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="88a3dbaa-6957-40e2-ad04-32b9a2516a40" containerName="ssh-known-hosts-edpm-deployment" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.400102 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.405574 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.405719 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.405920 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.406224 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.413377 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb"] Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.498607 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.498937 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fdj7\" (UniqueName: \"kubernetes.io/projected/9b7391a9-8171-40e0-927d-73542f246a2e-kube-api-access-5fdj7\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.499259 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.601946 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fdj7\" (UniqueName: \"kubernetes.io/projected/9b7391a9-8171-40e0-927d-73542f246a2e-kube-api-access-5fdj7\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.602047 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.602166 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.606729 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.608050 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-ssh-key-openstack-edpm-ipam\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.619692 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fdj7\" (UniqueName: \"kubernetes.io/projected/9b7391a9-8171-40e0-927d-73542f246a2e-kube-api-access-5fdj7\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-48scb\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:33 crc kubenswrapper[4767]: I0128 19:04:33.722267 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:34 crc kubenswrapper[4767]: I0128 19:04:34.294650 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb"] Jan 28 19:04:34 crc kubenswrapper[4767]: I0128 19:04:34.304843 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:04:34 crc kubenswrapper[4767]: I0128 19:04:34.331071 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" event={"ID":"9b7391a9-8171-40e0-927d-73542f246a2e","Type":"ContainerStarted","Data":"89fd0ab289409bec77e2595f3e22649a09d8c94b401c53fdeaf0531507e5aa51"} Jan 28 19:04:36 crc kubenswrapper[4767]: I0128 19:04:36.355565 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" event={"ID":"9b7391a9-8171-40e0-927d-73542f246a2e","Type":"ContainerStarted","Data":"ba07e4895e9bddba84102cb9b9bf65ba60dcd43bb048c819b51a9d02c0c5725a"} Jan 28 19:04:36 crc kubenswrapper[4767]: I0128 19:04:36.380922 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" podStartSLOduration=2.57401304 podStartE2EDuration="3.380902671s" podCreationTimestamp="2026-01-28 19:04:33 +0000 UTC" firstStartedPulling="2026-01-28 19:04:34.304632638 +0000 UTC m=+2080.268815512" lastFinishedPulling="2026-01-28 19:04:35.111522269 +0000 UTC m=+2081.075705143" observedRunningTime="2026-01-28 19:04:36.375899823 +0000 UTC m=+2082.340082707" watchObservedRunningTime="2026-01-28 19:04:36.380902671 +0000 UTC m=+2082.345085535" Jan 28 19:04:43 crc kubenswrapper[4767]: I0128 19:04:43.416361 4767 generic.go:334] "Generic (PLEG): container finished" podID="9b7391a9-8171-40e0-927d-73542f246a2e" containerID="ba07e4895e9bddba84102cb9b9bf65ba60dcd43bb048c819b51a9d02c0c5725a" exitCode=0 Jan 28 19:04:43 crc kubenswrapper[4767]: I0128 19:04:43.416479 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" event={"ID":"9b7391a9-8171-40e0-927d-73542f246a2e","Type":"ContainerDied","Data":"ba07e4895e9bddba84102cb9b9bf65ba60dcd43bb048c819b51a9d02c0c5725a"} Jan 28 19:04:44 crc kubenswrapper[4767]: I0128 19:04:44.871189 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:44 crc kubenswrapper[4767]: I0128 19:04:44.957029 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-ssh-key-openstack-edpm-ipam\") pod \"9b7391a9-8171-40e0-927d-73542f246a2e\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " Jan 28 19:04:44 crc kubenswrapper[4767]: I0128 19:04:44.957110 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fdj7\" (UniqueName: \"kubernetes.io/projected/9b7391a9-8171-40e0-927d-73542f246a2e-kube-api-access-5fdj7\") pod \"9b7391a9-8171-40e0-927d-73542f246a2e\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " Jan 28 19:04:44 crc kubenswrapper[4767]: I0128 19:04:44.957260 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-inventory\") pod \"9b7391a9-8171-40e0-927d-73542f246a2e\" (UID: \"9b7391a9-8171-40e0-927d-73542f246a2e\") " Jan 28 19:04:44 crc kubenswrapper[4767]: I0128 19:04:44.963646 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b7391a9-8171-40e0-927d-73542f246a2e-kube-api-access-5fdj7" (OuterVolumeSpecName: "kube-api-access-5fdj7") pod "9b7391a9-8171-40e0-927d-73542f246a2e" (UID: "9b7391a9-8171-40e0-927d-73542f246a2e"). InnerVolumeSpecName "kube-api-access-5fdj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:04:44 crc kubenswrapper[4767]: I0128 19:04:44.984823 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "9b7391a9-8171-40e0-927d-73542f246a2e" (UID: "9b7391a9-8171-40e0-927d-73542f246a2e"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:44 crc kubenswrapper[4767]: I0128 19:04:44.985380 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-inventory" (OuterVolumeSpecName: "inventory") pod "9b7391a9-8171-40e0-927d-73542f246a2e" (UID: "9b7391a9-8171-40e0-927d-73542f246a2e"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.059584 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.059626 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/9b7391a9-8171-40e0-927d-73542f246a2e-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.059639 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fdj7\" (UniqueName: \"kubernetes.io/projected/9b7391a9-8171-40e0-927d-73542f246a2e-kube-api-access-5fdj7\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.441729 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" event={"ID":"9b7391a9-8171-40e0-927d-73542f246a2e","Type":"ContainerDied","Data":"89fd0ab289409bec77e2595f3e22649a09d8c94b401c53fdeaf0531507e5aa51"} Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.442126 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="89fd0ab289409bec77e2595f3e22649a09d8c94b401c53fdeaf0531507e5aa51" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.441779 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-48scb" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.456722 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.456810 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.541953 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x"] Jan 28 19:04:45 crc kubenswrapper[4767]: E0128 19:04:45.542590 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b7391a9-8171-40e0-927d-73542f246a2e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.542616 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b7391a9-8171-40e0-927d-73542f246a2e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.542906 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b7391a9-8171-40e0-927d-73542f246a2e" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.544012 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.547722 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.548014 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.548178 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.548829 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.556240 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x"] Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.677131 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.677717 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.677745 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcc54\" (UniqueName: \"kubernetes.io/projected/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-kube-api-access-hcc54\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.780247 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.780663 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcc54\" (UniqueName: \"kubernetes.io/projected/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-kube-api-access-hcc54\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.780901 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.785737 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.790177 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-ssh-key-openstack-edpm-ipam\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.805932 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcc54\" (UniqueName: \"kubernetes.io/projected/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-kube-api-access-hcc54\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:45 crc kubenswrapper[4767]: I0128 19:04:45.863444 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:46 crc kubenswrapper[4767]: I0128 19:04:46.425049 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x"] Jan 28 19:04:46 crc kubenswrapper[4767]: I0128 19:04:46.454652 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" event={"ID":"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db","Type":"ContainerStarted","Data":"5c09c048faa688a60c1969fa4612a9672684aa98580e042338c3d4e176529850"} Jan 28 19:04:47 crc kubenswrapper[4767]: I0128 19:04:47.464426 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" event={"ID":"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db","Type":"ContainerStarted","Data":"1baf5d58ce22fc9f4736d181c7095d2ef6e43a42e149af66a90450e1cce982bf"} Jan 28 19:04:47 crc kubenswrapper[4767]: I0128 19:04:47.491917 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" podStartSLOduration=2.043874567 podStartE2EDuration="2.491886042s" podCreationTimestamp="2026-01-28 19:04:45 +0000 UTC" firstStartedPulling="2026-01-28 19:04:46.43534036 +0000 UTC m=+2092.399523234" lastFinishedPulling="2026-01-28 19:04:46.883351835 +0000 UTC m=+2092.847534709" observedRunningTime="2026-01-28 19:04:47.486319977 +0000 UTC m=+2093.450502871" watchObservedRunningTime="2026-01-28 19:04:47.491886042 +0000 UTC m=+2093.456068916" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.731596 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-mxn8h"] Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.736442 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.766751 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mxn8h"] Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.813328 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-utilities\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.813393 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-catalog-content\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.813467 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-krpbr\" (UniqueName: \"kubernetes.io/projected/0de556ec-c7d4-45e1-8997-24d52a5c58cb-kube-api-access-krpbr\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.915291 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-krpbr\" (UniqueName: \"kubernetes.io/projected/0de556ec-c7d4-45e1-8997-24d52a5c58cb-kube-api-access-krpbr\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.915601 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-utilities\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.915628 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-catalog-content\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.916282 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-utilities\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.918164 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-catalog-content\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:51 crc kubenswrapper[4767]: I0128 19:04:51.943552 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-krpbr\" (UniqueName: \"kubernetes.io/projected/0de556ec-c7d4-45e1-8997-24d52a5c58cb-kube-api-access-krpbr\") pod \"redhat-operators-mxn8h\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:52 crc kubenswrapper[4767]: I0128 19:04:52.070162 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:04:52 crc kubenswrapper[4767]: I0128 19:04:52.607241 4767 scope.go:117] "RemoveContainer" containerID="5419b3c6e5bacc7533521c2468285cb4edc97b700ec859eaefa3800ffb08504d" Jan 28 19:04:52 crc kubenswrapper[4767]: I0128 19:04:52.608417 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-mxn8h"] Jan 28 19:04:53 crc kubenswrapper[4767]: I0128 19:04:53.522327 4767 generic.go:334] "Generic (PLEG): container finished" podID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerID="9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3" exitCode=0 Jan 28 19:04:53 crc kubenswrapper[4767]: I0128 19:04:53.522439 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxn8h" event={"ID":"0de556ec-c7d4-45e1-8997-24d52a5c58cb","Type":"ContainerDied","Data":"9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3"} Jan 28 19:04:53 crc kubenswrapper[4767]: I0128 19:04:53.522848 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxn8h" event={"ID":"0de556ec-c7d4-45e1-8997-24d52a5c58cb","Type":"ContainerStarted","Data":"9328a86a4f30ffec198e2b37dadb39b1d5764a420f7e1d6d32211eda1daeb58b"} Jan 28 19:04:55 crc kubenswrapper[4767]: I0128 19:04:55.550322 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxn8h" event={"ID":"0de556ec-c7d4-45e1-8997-24d52a5c58cb","Type":"ContainerStarted","Data":"9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87"} Jan 28 19:04:57 crc kubenswrapper[4767]: I0128 19:04:57.570010 4767 generic.go:334] "Generic (PLEG): container finished" podID="b4fbc177-5b38-40bc-9ce3-1f4509ccf3db" containerID="1baf5d58ce22fc9f4736d181c7095d2ef6e43a42e149af66a90450e1cce982bf" exitCode=0 Jan 28 19:04:57 crc kubenswrapper[4767]: I0128 19:04:57.570131 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" event={"ID":"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db","Type":"ContainerDied","Data":"1baf5d58ce22fc9f4736d181c7095d2ef6e43a42e149af66a90450e1cce982bf"} Jan 28 19:04:57 crc kubenswrapper[4767]: I0128 19:04:57.573563 4767 generic.go:334] "Generic (PLEG): container finished" podID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerID="9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87" exitCode=0 Jan 28 19:04:57 crc kubenswrapper[4767]: I0128 19:04:57.573602 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxn8h" event={"ID":"0de556ec-c7d4-45e1-8997-24d52a5c58cb","Type":"ContainerDied","Data":"9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87"} Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.077430 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.192031 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcc54\" (UniqueName: \"kubernetes.io/projected/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-kube-api-access-hcc54\") pod \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.192149 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-ssh-key-openstack-edpm-ipam\") pod \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.192396 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-inventory\") pod \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\" (UID: \"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db\") " Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.210032 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-kube-api-access-hcc54" (OuterVolumeSpecName: "kube-api-access-hcc54") pod "b4fbc177-5b38-40bc-9ce3-1f4509ccf3db" (UID: "b4fbc177-5b38-40bc-9ce3-1f4509ccf3db"). InnerVolumeSpecName "kube-api-access-hcc54". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.233946 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-inventory" (OuterVolumeSpecName: "inventory") pod "b4fbc177-5b38-40bc-9ce3-1f4509ccf3db" (UID: "b4fbc177-5b38-40bc-9ce3-1f4509ccf3db"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.236187 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b4fbc177-5b38-40bc-9ce3-1f4509ccf3db" (UID: "b4fbc177-5b38-40bc-9ce3-1f4509ccf3db"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.295278 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.295337 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.295351 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcc54\" (UniqueName: \"kubernetes.io/projected/b4fbc177-5b38-40bc-9ce3-1f4509ccf3db-kube-api-access-hcc54\") on node \"crc\" DevicePath \"\"" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.606444 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxn8h" event={"ID":"0de556ec-c7d4-45e1-8997-24d52a5c58cb","Type":"ContainerStarted","Data":"76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754"} Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.634952 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" event={"ID":"b4fbc177-5b38-40bc-9ce3-1f4509ccf3db","Type":"ContainerDied","Data":"5c09c048faa688a60c1969fa4612a9672684aa98580e042338c3d4e176529850"} Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.635008 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5c09c048faa688a60c1969fa4612a9672684aa98580e042338c3d4e176529850" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.635091 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.642876 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-mxn8h" podStartSLOduration=3.816515291 podStartE2EDuration="8.642848252s" podCreationTimestamp="2026-01-28 19:04:51 +0000 UTC" firstStartedPulling="2026-01-28 19:04:53.525701183 +0000 UTC m=+2099.489884067" lastFinishedPulling="2026-01-28 19:04:58.352034164 +0000 UTC m=+2104.316217028" observedRunningTime="2026-01-28 19:04:59.625915458 +0000 UTC m=+2105.590098332" watchObservedRunningTime="2026-01-28 19:04:59.642848252 +0000 UTC m=+2105.607031126" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.701606 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx"] Jan 28 19:04:59 crc kubenswrapper[4767]: E0128 19:04:59.706024 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b4fbc177-5b38-40bc-9ce3-1f4509ccf3db" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.706067 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b4fbc177-5b38-40bc-9ce3-1f4509ccf3db" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.706401 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b4fbc177-5b38-40bc-9ce3-1f4509ccf3db" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.707636 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.711132 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.713741 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.715157 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.715386 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.715514 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.716125 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.716299 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.716410 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.717654 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx"] Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805028 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805114 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805182 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805355 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805472 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805541 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805662 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805757 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805814 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7mjs\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-kube-api-access-w7mjs\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805847 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.805975 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.806024 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.806070 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.806150 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.907879 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.907923 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.907992 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908033 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908064 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908089 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908121 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908162 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908220 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7mjs\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-kube-api-access-w7mjs\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908242 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908289 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908310 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908347 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.908372 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.912970 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ssh-key-openstack-edpm-ipam\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.913448 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.913634 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.918487 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.924758 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.924787 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.925939 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.925780 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.926452 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.927115 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.928585 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.932902 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.933626 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:04:59 crc kubenswrapper[4767]: I0128 19:04:59.937340 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7mjs\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-kube-api-access-w7mjs\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-846wx\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:05:00 crc kubenswrapper[4767]: I0128 19:05:00.036523 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:05:00 crc kubenswrapper[4767]: I0128 19:05:00.756191 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx"] Jan 28 19:05:01 crc kubenswrapper[4767]: I0128 19:05:01.655849 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" event={"ID":"a58fe523-7845-4753-8549-c70919ee390b","Type":"ContainerStarted","Data":"8674fd557ba807f32002153b9facf99ddc54bcb687e0a296f55e4e7328e90dc9"} Jan 28 19:05:01 crc kubenswrapper[4767]: I0128 19:05:01.656277 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" event={"ID":"a58fe523-7845-4753-8549-c70919ee390b","Type":"ContainerStarted","Data":"a3a4704afeec9515449d15b6e72a8f5286b31d63d9ab1f9c0c4973d0f017c7a0"} Jan 28 19:05:01 crc kubenswrapper[4767]: I0128 19:05:01.690369 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" podStartSLOduration=2.246043829 podStartE2EDuration="2.690346919s" podCreationTimestamp="2026-01-28 19:04:59 +0000 UTC" firstStartedPulling="2026-01-28 19:05:00.734564483 +0000 UTC m=+2106.698747357" lastFinishedPulling="2026-01-28 19:05:01.178867573 +0000 UTC m=+2107.143050447" observedRunningTime="2026-01-28 19:05:01.679854838 +0000 UTC m=+2107.644037712" watchObservedRunningTime="2026-01-28 19:05:01.690346919 +0000 UTC m=+2107.654529793" Jan 28 19:05:02 crc kubenswrapper[4767]: I0128 19:05:02.070964 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:05:02 crc kubenswrapper[4767]: I0128 19:05:02.071066 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:05:03 crc kubenswrapper[4767]: I0128 19:05:03.131853 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mxn8h" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="registry-server" probeResult="failure" output=< Jan 28 19:05:03 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 19:05:03 crc kubenswrapper[4767]: > Jan 28 19:05:13 crc kubenswrapper[4767]: I0128 19:05:13.116631 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-mxn8h" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="registry-server" probeResult="failure" output=< Jan 28 19:05:13 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 19:05:13 crc kubenswrapper[4767]: > Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.455928 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.457152 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.457444 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.458508 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"967b8a4723891c56d9bbc8957571ded4ea00629b76b3b712cd2b57f9a1382c6f"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.458661 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://967b8a4723891c56d9bbc8957571ded4ea00629b76b3b712cd2b57f9a1382c6f" gracePeriod=600 Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.845780 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="967b8a4723891c56d9bbc8957571ded4ea00629b76b3b712cd2b57f9a1382c6f" exitCode=0 Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.845869 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"967b8a4723891c56d9bbc8957571ded4ea00629b76b3b712cd2b57f9a1382c6f"} Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.845926 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e"} Jan 28 19:05:15 crc kubenswrapper[4767]: I0128 19:05:15.845987 4767 scope.go:117] "RemoveContainer" containerID="568d040ab31746bf679f53fc1ae17011986e0f61e345da366a6ba0e4f9057152" Jan 28 19:05:22 crc kubenswrapper[4767]: I0128 19:05:22.120024 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:05:22 crc kubenswrapper[4767]: I0128 19:05:22.173872 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:05:22 crc kubenswrapper[4767]: I0128 19:05:22.944199 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mxn8h"] Jan 28 19:05:23 crc kubenswrapper[4767]: I0128 19:05:23.938581 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-mxn8h" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="registry-server" containerID="cri-o://76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754" gracePeriod=2 Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.507899 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.636365 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-krpbr\" (UniqueName: \"kubernetes.io/projected/0de556ec-c7d4-45e1-8997-24d52a5c58cb-kube-api-access-krpbr\") pod \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.636599 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-catalog-content\") pod \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.636677 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-utilities\") pod \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\" (UID: \"0de556ec-c7d4-45e1-8997-24d52a5c58cb\") " Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.637800 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-utilities" (OuterVolumeSpecName: "utilities") pod "0de556ec-c7d4-45e1-8997-24d52a5c58cb" (UID: "0de556ec-c7d4-45e1-8997-24d52a5c58cb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.648059 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0de556ec-c7d4-45e1-8997-24d52a5c58cb-kube-api-access-krpbr" (OuterVolumeSpecName: "kube-api-access-krpbr") pod "0de556ec-c7d4-45e1-8997-24d52a5c58cb" (UID: "0de556ec-c7d4-45e1-8997-24d52a5c58cb"). InnerVolumeSpecName "kube-api-access-krpbr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.740283 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-krpbr\" (UniqueName: \"kubernetes.io/projected/0de556ec-c7d4-45e1-8997-24d52a5c58cb-kube-api-access-krpbr\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.740328 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.764693 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0de556ec-c7d4-45e1-8997-24d52a5c58cb" (UID: "0de556ec-c7d4-45e1-8997-24d52a5c58cb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.842481 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0de556ec-c7d4-45e1-8997-24d52a5c58cb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.951196 4767 generic.go:334] "Generic (PLEG): container finished" podID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerID="76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754" exitCode=0 Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.951267 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxn8h" event={"ID":"0de556ec-c7d4-45e1-8997-24d52a5c58cb","Type":"ContainerDied","Data":"76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754"} Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.951308 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-mxn8h" event={"ID":"0de556ec-c7d4-45e1-8997-24d52a5c58cb","Type":"ContainerDied","Data":"9328a86a4f30ffec198e2b37dadb39b1d5764a420f7e1d6d32211eda1daeb58b"} Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.951333 4767 scope.go:117] "RemoveContainer" containerID="76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.951341 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-mxn8h" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.982285 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-mxn8h"] Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.984187 4767 scope.go:117] "RemoveContainer" containerID="9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87" Jan 28 19:05:24 crc kubenswrapper[4767]: I0128 19:05:24.994878 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-mxn8h"] Jan 28 19:05:25 crc kubenswrapper[4767]: I0128 19:05:25.026647 4767 scope.go:117] "RemoveContainer" containerID="9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3" Jan 28 19:05:25 crc kubenswrapper[4767]: I0128 19:05:25.069018 4767 scope.go:117] "RemoveContainer" containerID="76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754" Jan 28 19:05:25 crc kubenswrapper[4767]: E0128 19:05:25.069739 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754\": container with ID starting with 76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754 not found: ID does not exist" containerID="76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754" Jan 28 19:05:25 crc kubenswrapper[4767]: I0128 19:05:25.069795 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754"} err="failed to get container status \"76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754\": rpc error: code = NotFound desc = could not find container \"76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754\": container with ID starting with 76b7b681a2c8dec6788cc2945c1506245e4a593443e20d1edb931eefe3b6e754 not found: ID does not exist" Jan 28 19:05:25 crc kubenswrapper[4767]: I0128 19:05:25.069835 4767 scope.go:117] "RemoveContainer" containerID="9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87" Jan 28 19:05:25 crc kubenswrapper[4767]: E0128 19:05:25.070562 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87\": container with ID starting with 9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87 not found: ID does not exist" containerID="9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87" Jan 28 19:05:25 crc kubenswrapper[4767]: I0128 19:05:25.070621 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87"} err="failed to get container status \"9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87\": rpc error: code = NotFound desc = could not find container \"9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87\": container with ID starting with 9b3525a0020822f252de4aee1cebfe9d4e4b025f846462eb3dab25e6590dab87 not found: ID does not exist" Jan 28 19:05:25 crc kubenswrapper[4767]: I0128 19:05:25.070702 4767 scope.go:117] "RemoveContainer" containerID="9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3" Jan 28 19:05:25 crc kubenswrapper[4767]: E0128 19:05:25.071253 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3\": container with ID starting with 9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3 not found: ID does not exist" containerID="9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3" Jan 28 19:05:25 crc kubenswrapper[4767]: I0128 19:05:25.071293 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3"} err="failed to get container status \"9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3\": rpc error: code = NotFound desc = could not find container \"9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3\": container with ID starting with 9b13e1fd70ba89abca25a0202bcd6320e607a4345863fdcfcf07d8294f035df3 not found: ID does not exist" Jan 28 19:05:26 crc kubenswrapper[4767]: I0128 19:05:26.810258 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" path="/var/lib/kubelet/pods/0de556ec-c7d4-45e1-8997-24d52a5c58cb/volumes" Jan 28 19:05:39 crc kubenswrapper[4767]: I0128 19:05:39.123102 4767 generic.go:334] "Generic (PLEG): container finished" podID="a58fe523-7845-4753-8549-c70919ee390b" containerID="8674fd557ba807f32002153b9facf99ddc54bcb687e0a296f55e4e7328e90dc9" exitCode=0 Jan 28 19:05:39 crc kubenswrapper[4767]: I0128 19:05:39.123183 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" event={"ID":"a58fe523-7845-4753-8549-c70919ee390b","Type":"ContainerDied","Data":"8674fd557ba807f32002153b9facf99ddc54bcb687e0a296f55e4e7328e90dc9"} Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.610696 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.798799 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ssh-key-openstack-edpm-ipam\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.798871 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-repo-setup-combined-ca-bundle\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.798913 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-inventory\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.798956 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-bootstrap-combined-ca-bundle\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.799002 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-ovn-default-certs-0\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.799036 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ovn-combined-ca-bundle\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800514 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-nova-combined-ca-bundle\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800615 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800684 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800726 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-neutron-metadata-combined-ca-bundle\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800768 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800834 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-libvirt-combined-ca-bundle\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800867 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-telemetry-combined-ca-bundle\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.800944 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7mjs\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-kube-api-access-w7mjs\") pod \"a58fe523-7845-4753-8549-c70919ee390b\" (UID: \"a58fe523-7845-4753-8549-c70919ee390b\") " Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.808505 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.809733 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.811375 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.812061 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.812674 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.812883 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.813967 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.814742 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.815027 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.816507 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-kube-api-access-w7mjs" (OuterVolumeSpecName: "kube-api-access-w7mjs") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "kube-api-access-w7mjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.817061 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.818079 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.840133 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.846821 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-inventory" (OuterVolumeSpecName: "inventory") pod "a58fe523-7845-4753-8549-c70919ee390b" (UID: "a58fe523-7845-4753-8549-c70919ee390b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.904973 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905387 4767 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905455 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905518 4767 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905572 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905630 4767 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905693 4767 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905753 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905815 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905884 4767 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.905941 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.906006 4767 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.906065 4767 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a58fe523-7845-4753-8549-c70919ee390b-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:40 crc kubenswrapper[4767]: I0128 19:05:40.906124 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7mjs\" (UniqueName: \"kubernetes.io/projected/a58fe523-7845-4753-8549-c70919ee390b-kube-api-access-w7mjs\") on node \"crc\" DevicePath \"\"" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.146123 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" event={"ID":"a58fe523-7845-4753-8549-c70919ee390b","Type":"ContainerDied","Data":"a3a4704afeec9515449d15b6e72a8f5286b31d63d9ab1f9c0c4973d0f017c7a0"} Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.146165 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3a4704afeec9515449d15b6e72a8f5286b31d63d9ab1f9c0c4973d0f017c7a0" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.146274 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-846wx" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.258590 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr"] Jan 28 19:05:41 crc kubenswrapper[4767]: E0128 19:05:41.259058 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="extract-utilities" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.259086 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="extract-utilities" Jan 28 19:05:41 crc kubenswrapper[4767]: E0128 19:05:41.259106 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="extract-content" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.259116 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="extract-content" Jan 28 19:05:41 crc kubenswrapper[4767]: E0128 19:05:41.259149 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a58fe523-7845-4753-8549-c70919ee390b" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.259163 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a58fe523-7845-4753-8549-c70919ee390b" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 19:05:41 crc kubenswrapper[4767]: E0128 19:05:41.259194 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="registry-server" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.259220 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="registry-server" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.259430 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de556ec-c7d4-45e1-8997-24d52a5c58cb" containerName="registry-server" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.259446 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a58fe523-7845-4753-8549-c70919ee390b" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.260133 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.266362 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.266399 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.270122 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.270711 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.280922 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.295365 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr"] Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.417082 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7xjh\" (UniqueName: \"kubernetes.io/projected/5262098e-80da-417a-92db-44c89a52ae2f-kube-api-access-m7xjh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.417279 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.417325 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.417448 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.417505 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5262098e-80da-417a-92db-44c89a52ae2f-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.519821 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7xjh\" (UniqueName: \"kubernetes.io/projected/5262098e-80da-417a-92db-44c89a52ae2f-kube-api-access-m7xjh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.519945 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.519982 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.520063 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.520101 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5262098e-80da-417a-92db-44c89a52ae2f-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.521794 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5262098e-80da-417a-92db-44c89a52ae2f-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.524690 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.527870 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ssh-key-openstack-edpm-ipam\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.537847 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.545030 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7xjh\" (UniqueName: \"kubernetes.io/projected/5262098e-80da-417a-92db-44c89a52ae2f-kube-api-access-m7xjh\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-rcqwr\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:41 crc kubenswrapper[4767]: I0128 19:05:41.599224 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:05:42 crc kubenswrapper[4767]: I0128 19:05:42.208266 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr"] Jan 28 19:05:43 crc kubenswrapper[4767]: I0128 19:05:43.167406 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" event={"ID":"5262098e-80da-417a-92db-44c89a52ae2f","Type":"ContainerStarted","Data":"a3e9ff6f0679065e9f88f9fbfb273a590578d99d93b8054dc6bbf668b6816607"} Jan 28 19:05:44 crc kubenswrapper[4767]: I0128 19:05:44.176971 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" event={"ID":"5262098e-80da-417a-92db-44c89a52ae2f","Type":"ContainerStarted","Data":"f434cfe3bbab44a674f0cd73628bafc802c82bc8be9e91d4ab33afc75d78c1da"} Jan 28 19:05:44 crc kubenswrapper[4767]: I0128 19:05:44.198716 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" podStartSLOduration=2.213315417 podStartE2EDuration="3.198695945s" podCreationTimestamp="2026-01-28 19:05:41 +0000 UTC" firstStartedPulling="2026-01-28 19:05:42.222628161 +0000 UTC m=+2148.186811035" lastFinishedPulling="2026-01-28 19:05:43.208008679 +0000 UTC m=+2149.172191563" observedRunningTime="2026-01-28 19:05:44.194694389 +0000 UTC m=+2150.158877263" watchObservedRunningTime="2026-01-28 19:05:44.198695945 +0000 UTC m=+2150.162878819" Jan 28 19:06:50 crc kubenswrapper[4767]: I0128 19:06:50.846506 4767 generic.go:334] "Generic (PLEG): container finished" podID="5262098e-80da-417a-92db-44c89a52ae2f" containerID="f434cfe3bbab44a674f0cd73628bafc802c82bc8be9e91d4ab33afc75d78c1da" exitCode=0 Jan 28 19:06:50 crc kubenswrapper[4767]: I0128 19:06:50.846813 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" event={"ID":"5262098e-80da-417a-92db-44c89a52ae2f","Type":"ContainerDied","Data":"f434cfe3bbab44a674f0cd73628bafc802c82bc8be9e91d4ab33afc75d78c1da"} Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.361243 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.485143 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ssh-key-openstack-edpm-ipam\") pod \"5262098e-80da-417a-92db-44c89a52ae2f\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.485296 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ovn-combined-ca-bundle\") pod \"5262098e-80da-417a-92db-44c89a52ae2f\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.485348 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-inventory\") pod \"5262098e-80da-417a-92db-44c89a52ae2f\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.485444 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7xjh\" (UniqueName: \"kubernetes.io/projected/5262098e-80da-417a-92db-44c89a52ae2f-kube-api-access-m7xjh\") pod \"5262098e-80da-417a-92db-44c89a52ae2f\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.485653 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5262098e-80da-417a-92db-44c89a52ae2f-ovncontroller-config-0\") pod \"5262098e-80da-417a-92db-44c89a52ae2f\" (UID: \"5262098e-80da-417a-92db-44c89a52ae2f\") " Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.491543 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "5262098e-80da-417a-92db-44c89a52ae2f" (UID: "5262098e-80da-417a-92db-44c89a52ae2f"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.492910 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5262098e-80da-417a-92db-44c89a52ae2f-kube-api-access-m7xjh" (OuterVolumeSpecName: "kube-api-access-m7xjh") pod "5262098e-80da-417a-92db-44c89a52ae2f" (UID: "5262098e-80da-417a-92db-44c89a52ae2f"). InnerVolumeSpecName "kube-api-access-m7xjh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.512604 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5262098e-80da-417a-92db-44c89a52ae2f-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "5262098e-80da-417a-92db-44c89a52ae2f" (UID: "5262098e-80da-417a-92db-44c89a52ae2f"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.516373 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-inventory" (OuterVolumeSpecName: "inventory") pod "5262098e-80da-417a-92db-44c89a52ae2f" (UID: "5262098e-80da-417a-92db-44c89a52ae2f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.517226 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "5262098e-80da-417a-92db-44c89a52ae2f" (UID: "5262098e-80da-417a-92db-44c89a52ae2f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.588505 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.588539 4767 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.588549 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/5262098e-80da-417a-92db-44c89a52ae2f-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.588558 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7xjh\" (UniqueName: \"kubernetes.io/projected/5262098e-80da-417a-92db-44c89a52ae2f-kube-api-access-m7xjh\") on node \"crc\" DevicePath \"\"" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.588567 4767 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/5262098e-80da-417a-92db-44c89a52ae2f-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.868011 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" event={"ID":"5262098e-80da-417a-92db-44c89a52ae2f","Type":"ContainerDied","Data":"a3e9ff6f0679065e9f88f9fbfb273a590578d99d93b8054dc6bbf668b6816607"} Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.868057 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a3e9ff6f0679065e9f88f9fbfb273a590578d99d93b8054dc6bbf668b6816607" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.868161 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-rcqwr" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.972334 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv"] Jan 28 19:06:52 crc kubenswrapper[4767]: E0128 19:06:52.972789 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5262098e-80da-417a-92db-44c89a52ae2f" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.972809 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="5262098e-80da-417a-92db-44c89a52ae2f" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.972992 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="5262098e-80da-417a-92db-44c89a52ae2f" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.973755 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.984960 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.984959 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.985027 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.985046 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.985134 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.985958 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv"] Jan 28 19:06:52 crc kubenswrapper[4767]: I0128 19:06:52.988571 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.100903 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-clrfr\" (UniqueName: \"kubernetes.io/projected/485ff095-77ad-4166-af4c-1b900e3d2c4a-kube-api-access-clrfr\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.101021 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.101092 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.101138 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.101336 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.101395 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.203473 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.204453 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.204534 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-clrfr\" (UniqueName: \"kubernetes.io/projected/485ff095-77ad-4166-af4c-1b900e3d2c4a-kube-api-access-clrfr\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.204581 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.204663 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.204698 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.210485 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.210495 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.211482 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.211641 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.212930 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-ssh-key-openstack-edpm-ipam\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.223621 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-clrfr\" (UniqueName: \"kubernetes.io/projected/485ff095-77ad-4166-af4c-1b900e3d2c4a-kube-api-access-clrfr\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.291422 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.819241 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv"] Jan 28 19:06:53 crc kubenswrapper[4767]: I0128 19:06:53.880495 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" event={"ID":"485ff095-77ad-4166-af4c-1b900e3d2c4a","Type":"ContainerStarted","Data":"411c1625128a02541dc023b9e8b6d1c7b4a678f0a656b4ac78ddb7b38a9b36d3"} Jan 28 19:06:54 crc kubenswrapper[4767]: I0128 19:06:54.891262 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" event={"ID":"485ff095-77ad-4166-af4c-1b900e3d2c4a","Type":"ContainerStarted","Data":"ee5f2e31481af1deda45f98993cc09d2153f05cae7e0f14636032cc62074f236"} Jan 28 19:06:54 crc kubenswrapper[4767]: I0128 19:06:54.924704 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" podStartSLOduration=2.400507248 podStartE2EDuration="2.924681725s" podCreationTimestamp="2026-01-28 19:06:52 +0000 UTC" firstStartedPulling="2026-01-28 19:06:53.8217283 +0000 UTC m=+2219.785911174" lastFinishedPulling="2026-01-28 19:06:54.345902767 +0000 UTC m=+2220.310085651" observedRunningTime="2026-01-28 19:06:54.914957248 +0000 UTC m=+2220.879140142" watchObservedRunningTime="2026-01-28 19:06:54.924681725 +0000 UTC m=+2220.888864599" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.133473 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xb4rs"] Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.136698 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.146600 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xb4rs"] Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.240255 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-utilities\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.241009 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vgmq\" (UniqueName: \"kubernetes.io/projected/c0d284ff-941b-40da-94ad-021c99de2be9-kube-api-access-4vgmq\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.241087 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-catalog-content\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.343351 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vgmq\" (UniqueName: \"kubernetes.io/projected/c0d284ff-941b-40da-94ad-021c99de2be9-kube-api-access-4vgmq\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.343453 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-catalog-content\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.343510 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-utilities\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.344146 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-utilities\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.344697 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-catalog-content\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.366471 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vgmq\" (UniqueName: \"kubernetes.io/projected/c0d284ff-941b-40da-94ad-021c99de2be9-kube-api-access-4vgmq\") pod \"community-operators-xb4rs\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:11 crc kubenswrapper[4767]: I0128 19:07:11.457639 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:12 crc kubenswrapper[4767]: I0128 19:07:12.141635 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xb4rs"] Jan 28 19:07:13 crc kubenswrapper[4767]: I0128 19:07:13.084638 4767 generic.go:334] "Generic (PLEG): container finished" podID="c0d284ff-941b-40da-94ad-021c99de2be9" containerID="0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5" exitCode=0 Jan 28 19:07:13 crc kubenswrapper[4767]: I0128 19:07:13.084791 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xb4rs" event={"ID":"c0d284ff-941b-40da-94ad-021c99de2be9","Type":"ContainerDied","Data":"0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5"} Jan 28 19:07:13 crc kubenswrapper[4767]: I0128 19:07:13.085099 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xb4rs" event={"ID":"c0d284ff-941b-40da-94ad-021c99de2be9","Type":"ContainerStarted","Data":"52f19325ea5f55106b15a22b203095d48d1e09027c9231fbf8ef345f6606dd26"} Jan 28 19:07:15 crc kubenswrapper[4767]: I0128 19:07:15.107645 4767 generic.go:334] "Generic (PLEG): container finished" podID="c0d284ff-941b-40da-94ad-021c99de2be9" containerID="23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a" exitCode=0 Jan 28 19:07:15 crc kubenswrapper[4767]: I0128 19:07:15.107714 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xb4rs" event={"ID":"c0d284ff-941b-40da-94ad-021c99de2be9","Type":"ContainerDied","Data":"23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a"} Jan 28 19:07:15 crc kubenswrapper[4767]: I0128 19:07:15.455217 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:07:15 crc kubenswrapper[4767]: I0128 19:07:15.455770 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:07:16 crc kubenswrapper[4767]: I0128 19:07:16.127677 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xb4rs" event={"ID":"c0d284ff-941b-40da-94ad-021c99de2be9","Type":"ContainerStarted","Data":"839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4"} Jan 28 19:07:21 crc kubenswrapper[4767]: I0128 19:07:21.458150 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:21 crc kubenswrapper[4767]: I0128 19:07:21.458781 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:21 crc kubenswrapper[4767]: I0128 19:07:21.521113 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:21 crc kubenswrapper[4767]: I0128 19:07:21.543504 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xb4rs" podStartSLOduration=8.100260183 podStartE2EDuration="10.543483627s" podCreationTimestamp="2026-01-28 19:07:11 +0000 UTC" firstStartedPulling="2026-01-28 19:07:13.087283919 +0000 UTC m=+2239.051466793" lastFinishedPulling="2026-01-28 19:07:15.530507343 +0000 UTC m=+2241.494690237" observedRunningTime="2026-01-28 19:07:16.155612921 +0000 UTC m=+2242.119795795" watchObservedRunningTime="2026-01-28 19:07:21.543483627 +0000 UTC m=+2247.507666501" Jan 28 19:07:22 crc kubenswrapper[4767]: I0128 19:07:22.230877 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:22 crc kubenswrapper[4767]: I0128 19:07:22.291534 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xb4rs"] Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.201908 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xb4rs" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="registry-server" containerID="cri-o://839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4" gracePeriod=2 Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.693887 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.771411 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vgmq\" (UniqueName: \"kubernetes.io/projected/c0d284ff-941b-40da-94ad-021c99de2be9-kube-api-access-4vgmq\") pod \"c0d284ff-941b-40da-94ad-021c99de2be9\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.771764 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-catalog-content\") pod \"c0d284ff-941b-40da-94ad-021c99de2be9\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.771935 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-utilities\") pod \"c0d284ff-941b-40da-94ad-021c99de2be9\" (UID: \"c0d284ff-941b-40da-94ad-021c99de2be9\") " Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.773161 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-utilities" (OuterVolumeSpecName: "utilities") pod "c0d284ff-941b-40da-94ad-021c99de2be9" (UID: "c0d284ff-941b-40da-94ad-021c99de2be9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.779969 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0d284ff-941b-40da-94ad-021c99de2be9-kube-api-access-4vgmq" (OuterVolumeSpecName: "kube-api-access-4vgmq") pod "c0d284ff-941b-40da-94ad-021c99de2be9" (UID: "c0d284ff-941b-40da-94ad-021c99de2be9"). InnerVolumeSpecName "kube-api-access-4vgmq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.833383 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c0d284ff-941b-40da-94ad-021c99de2be9" (UID: "c0d284ff-941b-40da-94ad-021c99de2be9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.875642 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.875710 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c0d284ff-941b-40da-94ad-021c99de2be9-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:24 crc kubenswrapper[4767]: I0128 19:07:24.875724 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vgmq\" (UniqueName: \"kubernetes.io/projected/c0d284ff-941b-40da-94ad-021c99de2be9-kube-api-access-4vgmq\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.216757 4767 generic.go:334] "Generic (PLEG): container finished" podID="c0d284ff-941b-40da-94ad-021c99de2be9" containerID="839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4" exitCode=0 Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.216819 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xb4rs" event={"ID":"c0d284ff-941b-40da-94ad-021c99de2be9","Type":"ContainerDied","Data":"839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4"} Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.216830 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xb4rs" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.216860 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xb4rs" event={"ID":"c0d284ff-941b-40da-94ad-021c99de2be9","Type":"ContainerDied","Data":"52f19325ea5f55106b15a22b203095d48d1e09027c9231fbf8ef345f6606dd26"} Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.216886 4767 scope.go:117] "RemoveContainer" containerID="839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.243287 4767 scope.go:117] "RemoveContainer" containerID="23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.263922 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xb4rs"] Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.276514 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xb4rs"] Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.300314 4767 scope.go:117] "RemoveContainer" containerID="0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.335175 4767 scope.go:117] "RemoveContainer" containerID="839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4" Jan 28 19:07:25 crc kubenswrapper[4767]: E0128 19:07:25.336780 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4\": container with ID starting with 839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4 not found: ID does not exist" containerID="839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.336854 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4"} err="failed to get container status \"839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4\": rpc error: code = NotFound desc = could not find container \"839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4\": container with ID starting with 839c5743d9b4c00d731f52950321203dbc1de7d066d126a84cfae46afab0e2d4 not found: ID does not exist" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.336897 4767 scope.go:117] "RemoveContainer" containerID="23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a" Jan 28 19:07:25 crc kubenswrapper[4767]: E0128 19:07:25.337582 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a\": container with ID starting with 23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a not found: ID does not exist" containerID="23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.337618 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a"} err="failed to get container status \"23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a\": rpc error: code = NotFound desc = could not find container \"23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a\": container with ID starting with 23c307c469afda825e85216128c54553ab5f30f2440db236f6d816b8d30ef27a not found: ID does not exist" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.337639 4767 scope.go:117] "RemoveContainer" containerID="0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5" Jan 28 19:07:25 crc kubenswrapper[4767]: E0128 19:07:25.338096 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5\": container with ID starting with 0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5 not found: ID does not exist" containerID="0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5" Jan 28 19:07:25 crc kubenswrapper[4767]: I0128 19:07:25.338130 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5"} err="failed to get container status \"0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5\": rpc error: code = NotFound desc = could not find container \"0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5\": container with ID starting with 0e429d7c1b70c4c1857ed696310b3a552b16798c84c0b05dcf954578453229a5 not found: ID does not exist" Jan 28 19:07:26 crc kubenswrapper[4767]: I0128 19:07:26.808007 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" path="/var/lib/kubelet/pods/c0d284ff-941b-40da-94ad-021c99de2be9/volumes" Jan 28 19:07:41 crc kubenswrapper[4767]: I0128 19:07:41.373523 4767 generic.go:334] "Generic (PLEG): container finished" podID="485ff095-77ad-4166-af4c-1b900e3d2c4a" containerID="ee5f2e31481af1deda45f98993cc09d2153f05cae7e0f14636032cc62074f236" exitCode=0 Jan 28 19:07:41 crc kubenswrapper[4767]: I0128 19:07:41.373610 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" event={"ID":"485ff095-77ad-4166-af4c-1b900e3d2c4a","Type":"ContainerDied","Data":"ee5f2e31481af1deda45f98993cc09d2153f05cae7e0f14636032cc62074f236"} Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.866316 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.942674 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-nova-metadata-neutron-config-0\") pod \"485ff095-77ad-4166-af4c-1b900e3d2c4a\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.942763 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-metadata-combined-ca-bundle\") pod \"485ff095-77ad-4166-af4c-1b900e3d2c4a\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.942865 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-clrfr\" (UniqueName: \"kubernetes.io/projected/485ff095-77ad-4166-af4c-1b900e3d2c4a-kube-api-access-clrfr\") pod \"485ff095-77ad-4166-af4c-1b900e3d2c4a\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.943009 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-inventory\") pod \"485ff095-77ad-4166-af4c-1b900e3d2c4a\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.943091 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-ovn-metadata-agent-neutron-config-0\") pod \"485ff095-77ad-4166-af4c-1b900e3d2c4a\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.943227 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-ssh-key-openstack-edpm-ipam\") pod \"485ff095-77ad-4166-af4c-1b900e3d2c4a\" (UID: \"485ff095-77ad-4166-af4c-1b900e3d2c4a\") " Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.950586 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "485ff095-77ad-4166-af4c-1b900e3d2c4a" (UID: "485ff095-77ad-4166-af4c-1b900e3d2c4a"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.951413 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/485ff095-77ad-4166-af4c-1b900e3d2c4a-kube-api-access-clrfr" (OuterVolumeSpecName: "kube-api-access-clrfr") pod "485ff095-77ad-4166-af4c-1b900e3d2c4a" (UID: "485ff095-77ad-4166-af4c-1b900e3d2c4a"). InnerVolumeSpecName "kube-api-access-clrfr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.974689 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-inventory" (OuterVolumeSpecName: "inventory") pod "485ff095-77ad-4166-af4c-1b900e3d2c4a" (UID: "485ff095-77ad-4166-af4c-1b900e3d2c4a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.976908 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "485ff095-77ad-4166-af4c-1b900e3d2c4a" (UID: "485ff095-77ad-4166-af4c-1b900e3d2c4a"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.983810 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "485ff095-77ad-4166-af4c-1b900e3d2c4a" (UID: "485ff095-77ad-4166-af4c-1b900e3d2c4a"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:07:42 crc kubenswrapper[4767]: I0128 19:07:42.988544 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "485ff095-77ad-4166-af4c-1b900e3d2c4a" (UID: "485ff095-77ad-4166-af4c-1b900e3d2c4a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.045858 4767 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.045960 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.045979 4767 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.045993 4767 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.046007 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-clrfr\" (UniqueName: \"kubernetes.io/projected/485ff095-77ad-4166-af4c-1b900e3d2c4a-kube-api-access-clrfr\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.046021 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/485ff095-77ad-4166-af4c-1b900e3d2c4a-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.406384 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" event={"ID":"485ff095-77ad-4166-af4c-1b900e3d2c4a","Type":"ContainerDied","Data":"411c1625128a02541dc023b9e8b6d1c7b4a678f0a656b4ac78ddb7b38a9b36d3"} Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.406440 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="411c1625128a02541dc023b9e8b6d1c7b4a678f0a656b4ac78ddb7b38a9b36d3" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.406531 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.500141 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64"] Jan 28 19:07:43 crc kubenswrapper[4767]: E0128 19:07:43.500999 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="registry-server" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.501143 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="registry-server" Jan 28 19:07:43 crc kubenswrapper[4767]: E0128 19:07:43.501267 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="extract-utilities" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.501357 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="extract-utilities" Jan 28 19:07:43 crc kubenswrapper[4767]: E0128 19:07:43.501496 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="485ff095-77ad-4166-af4c-1b900e3d2c4a" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.501586 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="485ff095-77ad-4166-af4c-1b900e3d2c4a" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 19:07:43 crc kubenswrapper[4767]: E0128 19:07:43.501683 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="extract-content" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.501777 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="extract-content" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.502054 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="485ff095-77ad-4166-af4c-1b900e3d2c4a" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.502134 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0d284ff-941b-40da-94ad-021c99de2be9" containerName="registry-server" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.502986 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.507831 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.508230 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.508615 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.508836 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.509088 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.521032 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64"] Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.659331 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.659477 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wrrdk\" (UniqueName: \"kubernetes.io/projected/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-kube-api-access-wrrdk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.659531 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.659779 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.659821 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.762301 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wrrdk\" (UniqueName: \"kubernetes.io/projected/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-kube-api-access-wrrdk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.762406 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.762544 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.762569 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.762595 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.769853 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.770845 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.771135 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.774129 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-ssh-key-openstack-edpm-ipam\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.786326 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wrrdk\" (UniqueName: \"kubernetes.io/projected/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-kube-api-access-wrrdk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-nkz64\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:43 crc kubenswrapper[4767]: I0128 19:07:43.823283 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:07:44 crc kubenswrapper[4767]: I0128 19:07:44.387270 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64"] Jan 28 19:07:44 crc kubenswrapper[4767]: I0128 19:07:44.416839 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" event={"ID":"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a","Type":"ContainerStarted","Data":"7f3d1caa5913b7c743d0c606ff250b541520f9a7da201d5e8a5de5264a52d98d"} Jan 28 19:07:45 crc kubenswrapper[4767]: I0128 19:07:45.428992 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" event={"ID":"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a","Type":"ContainerStarted","Data":"5f787434a8967b8b8ce0dca7d4fc92374fe190da93fa051c25162bdd13a9b2ab"} Jan 28 19:07:45 crc kubenswrapper[4767]: I0128 19:07:45.455657 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" podStartSLOduration=1.8287548679999999 podStartE2EDuration="2.455633673s" podCreationTimestamp="2026-01-28 19:07:43 +0000 UTC" firstStartedPulling="2026-01-28 19:07:44.399832858 +0000 UTC m=+2270.364015732" lastFinishedPulling="2026-01-28 19:07:45.026711643 +0000 UTC m=+2270.990894537" observedRunningTime="2026-01-28 19:07:45.44663439 +0000 UTC m=+2271.410817274" watchObservedRunningTime="2026-01-28 19:07:45.455633673 +0000 UTC m=+2271.419816557" Jan 28 19:07:45 crc kubenswrapper[4767]: I0128 19:07:45.456467 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:07:45 crc kubenswrapper[4767]: I0128 19:07:45.456726 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.455175 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.456148 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.456225 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.457410 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.457484 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" gracePeriod=600 Jan 28 19:08:15 crc kubenswrapper[4767]: E0128 19:08:15.597003 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.777995 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" exitCode=0 Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.778078 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e"} Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.778191 4767 scope.go:117] "RemoveContainer" containerID="967b8a4723891c56d9bbc8957571ded4ea00629b76b3b712cd2b57f9a1382c6f" Jan 28 19:08:15 crc kubenswrapper[4767]: I0128 19:08:15.780347 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:08:15 crc kubenswrapper[4767]: E0128 19:08:15.794634 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:08:30 crc kubenswrapper[4767]: I0128 19:08:30.797288 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:08:30 crc kubenswrapper[4767]: E0128 19:08:30.797939 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:08:41 crc kubenswrapper[4767]: I0128 19:08:41.796446 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:08:41 crc kubenswrapper[4767]: E0128 19:08:41.797489 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:08:56 crc kubenswrapper[4767]: I0128 19:08:56.796070 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:08:56 crc kubenswrapper[4767]: E0128 19:08:56.797412 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:09:10 crc kubenswrapper[4767]: I0128 19:09:10.796608 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:09:10 crc kubenswrapper[4767]: E0128 19:09:10.797677 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:09:25 crc kubenswrapper[4767]: I0128 19:09:25.796536 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:09:25 crc kubenswrapper[4767]: E0128 19:09:25.797698 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:09:39 crc kubenswrapper[4767]: I0128 19:09:39.796496 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:09:39 crc kubenswrapper[4767]: E0128 19:09:39.797421 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:09:50 crc kubenswrapper[4767]: I0128 19:09:50.796087 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:09:50 crc kubenswrapper[4767]: E0128 19:09:50.796902 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:10:04 crc kubenswrapper[4767]: I0128 19:10:04.804048 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:10:04 crc kubenswrapper[4767]: E0128 19:10:04.804941 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:10:20 crc kubenswrapper[4767]: I0128 19:10:20.109276 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:10:20 crc kubenswrapper[4767]: E0128 19:10:20.110276 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:10:35 crc kubenswrapper[4767]: I0128 19:10:35.796093 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:10:35 crc kubenswrapper[4767]: E0128 19:10:35.796894 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.360236 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-dfpfb"] Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.366309 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.371733 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfpfb"] Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.506695 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-utilities\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.507259 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-catalog-content\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.507557 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d6sqk\" (UniqueName: \"kubernetes.io/projected/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-kube-api-access-d6sqk\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.609761 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-utilities\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.609890 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-catalog-content\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.609951 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d6sqk\" (UniqueName: \"kubernetes.io/projected/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-kube-api-access-d6sqk\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.610866 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-utilities\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.610866 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-catalog-content\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.641090 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d6sqk\" (UniqueName: \"kubernetes.io/projected/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-kube-api-access-d6sqk\") pod \"redhat-marketplace-dfpfb\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:44 crc kubenswrapper[4767]: I0128 19:10:44.694848 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:45 crc kubenswrapper[4767]: I0128 19:10:45.376977 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfpfb"] Jan 28 19:10:46 crc kubenswrapper[4767]: I0128 19:10:46.386762 4767 generic.go:334] "Generic (PLEG): container finished" podID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerID="06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b" exitCode=0 Jan 28 19:10:46 crc kubenswrapper[4767]: I0128 19:10:46.386835 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfpfb" event={"ID":"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0","Type":"ContainerDied","Data":"06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b"} Jan 28 19:10:46 crc kubenswrapper[4767]: I0128 19:10:46.388150 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfpfb" event={"ID":"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0","Type":"ContainerStarted","Data":"79fe9ba6ba383882cc44ef00f13366113390f190e20b341cdf6e707f651183a6"} Jan 28 19:10:46 crc kubenswrapper[4767]: I0128 19:10:46.390721 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:10:47 crc kubenswrapper[4767]: I0128 19:10:47.398969 4767 generic.go:334] "Generic (PLEG): container finished" podID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerID="c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b" exitCode=0 Jan 28 19:10:47 crc kubenswrapper[4767]: I0128 19:10:47.399158 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfpfb" event={"ID":"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0","Type":"ContainerDied","Data":"c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b"} Jan 28 19:10:48 crc kubenswrapper[4767]: I0128 19:10:48.423244 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfpfb" event={"ID":"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0","Type":"ContainerStarted","Data":"942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21"} Jan 28 19:10:48 crc kubenswrapper[4767]: I0128 19:10:48.456554 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-dfpfb" podStartSLOduration=3.023385887 podStartE2EDuration="4.456523185s" podCreationTimestamp="2026-01-28 19:10:44 +0000 UTC" firstStartedPulling="2026-01-28 19:10:46.39045195 +0000 UTC m=+2452.354634814" lastFinishedPulling="2026-01-28 19:10:47.823589238 +0000 UTC m=+2453.787772112" observedRunningTime="2026-01-28 19:10:48.444183714 +0000 UTC m=+2454.408366598" watchObservedRunningTime="2026-01-28 19:10:48.456523185 +0000 UTC m=+2454.420706049" Jan 28 19:10:49 crc kubenswrapper[4767]: I0128 19:10:49.796250 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:10:49 crc kubenswrapper[4767]: E0128 19:10:49.796612 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:10:54 crc kubenswrapper[4767]: I0128 19:10:54.695617 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:54 crc kubenswrapper[4767]: I0128 19:10:54.698285 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:54 crc kubenswrapper[4767]: I0128 19:10:54.751347 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:55 crc kubenswrapper[4767]: I0128 19:10:55.558023 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:55 crc kubenswrapper[4767]: I0128 19:10:55.641142 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfpfb"] Jan 28 19:10:57 crc kubenswrapper[4767]: I0128 19:10:57.520886 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-dfpfb" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="registry-server" containerID="cri-o://942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21" gracePeriod=2 Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.127859 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.224268 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-utilities\") pod \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.224676 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6sqk\" (UniqueName: \"kubernetes.io/projected/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-kube-api-access-d6sqk\") pod \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.224752 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-catalog-content\") pod \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\" (UID: \"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0\") " Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.225413 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-utilities" (OuterVolumeSpecName: "utilities") pod "1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" (UID: "1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.232986 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-kube-api-access-d6sqk" (OuterVolumeSpecName: "kube-api-access-d6sqk") pod "1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" (UID: "1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0"). InnerVolumeSpecName "kube-api-access-d6sqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.246829 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" (UID: "1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.328354 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.328423 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6sqk\" (UniqueName: \"kubernetes.io/projected/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-kube-api-access-d6sqk\") on node \"crc\" DevicePath \"\"" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.328439 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.534791 4767 generic.go:334] "Generic (PLEG): container finished" podID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerID="942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21" exitCode=0 Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.534867 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfpfb" event={"ID":"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0","Type":"ContainerDied","Data":"942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21"} Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.534887 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-dfpfb" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.534917 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-dfpfb" event={"ID":"1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0","Type":"ContainerDied","Data":"79fe9ba6ba383882cc44ef00f13366113390f190e20b341cdf6e707f651183a6"} Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.534948 4767 scope.go:117] "RemoveContainer" containerID="942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.558572 4767 scope.go:117] "RemoveContainer" containerID="c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.579933 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfpfb"] Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.591040 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-dfpfb"] Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.604741 4767 scope.go:117] "RemoveContainer" containerID="06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.641602 4767 scope.go:117] "RemoveContainer" containerID="942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21" Jan 28 19:10:58 crc kubenswrapper[4767]: E0128 19:10:58.642411 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21\": container with ID starting with 942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21 not found: ID does not exist" containerID="942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.642515 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21"} err="failed to get container status \"942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21\": rpc error: code = NotFound desc = could not find container \"942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21\": container with ID starting with 942f1ec897dce70fde8a9f851a17fa5824d4595d0b1e572977546f682add5a21 not found: ID does not exist" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.642551 4767 scope.go:117] "RemoveContainer" containerID="c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b" Jan 28 19:10:58 crc kubenswrapper[4767]: E0128 19:10:58.643148 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b\": container with ID starting with c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b not found: ID does not exist" containerID="c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.643191 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b"} err="failed to get container status \"c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b\": rpc error: code = NotFound desc = could not find container \"c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b\": container with ID starting with c1f2ad92170025f99748d5482d783b967fecdd3916099eb87f0e6f2364dbe23b not found: ID does not exist" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.643239 4767 scope.go:117] "RemoveContainer" containerID="06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b" Jan 28 19:10:58 crc kubenswrapper[4767]: E0128 19:10:58.643543 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b\": container with ID starting with 06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b not found: ID does not exist" containerID="06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.643573 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b"} err="failed to get container status \"06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b\": rpc error: code = NotFound desc = could not find container \"06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b\": container with ID starting with 06db0ac0ede4e9100cccf07af2f343c708f49532585fa05e7675773d07a13f0b not found: ID does not exist" Jan 28 19:10:58 crc kubenswrapper[4767]: I0128 19:10:58.808177 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" path="/var/lib/kubelet/pods/1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0/volumes" Jan 28 19:11:01 crc kubenswrapper[4767]: I0128 19:11:01.796785 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:11:01 crc kubenswrapper[4767]: E0128 19:11:01.797673 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.846993 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lpj4m"] Jan 28 19:11:07 crc kubenswrapper[4767]: E0128 19:11:07.848307 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="extract-utilities" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.848324 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="extract-utilities" Jan 28 19:11:07 crc kubenswrapper[4767]: E0128 19:11:07.848333 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="extract-content" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.848339 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="extract-content" Jan 28 19:11:07 crc kubenswrapper[4767]: E0128 19:11:07.848387 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="registry-server" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.848394 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="registry-server" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.848590 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f0af3e8-b39e-4889-b8a6-aacbf7fe06a0" containerName="registry-server" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.850788 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.867812 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lpj4m"] Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.962423 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-utilities\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.963023 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-thzvb\" (UniqueName: \"kubernetes.io/projected/731b3090-7969-4da5-bcf3-e011f64563ee-kube-api-access-thzvb\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:07 crc kubenswrapper[4767]: I0128 19:11:07.963188 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-catalog-content\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.065816 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-catalog-content\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.065921 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-utilities\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.065981 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-thzvb\" (UniqueName: \"kubernetes.io/projected/731b3090-7969-4da5-bcf3-e011f64563ee-kube-api-access-thzvb\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.066946 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-catalog-content\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.067191 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-utilities\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.087903 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-thzvb\" (UniqueName: \"kubernetes.io/projected/731b3090-7969-4da5-bcf3-e011f64563ee-kube-api-access-thzvb\") pod \"certified-operators-lpj4m\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.188497 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:08 crc kubenswrapper[4767]: W0128 19:11:08.729590 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod731b3090_7969_4da5_bcf3_e011f64563ee.slice/crio-55f0e03976f8620246a933bf3dd0552698f51e9d5b39633d548554e3f6753a45 WatchSource:0}: Error finding container 55f0e03976f8620246a933bf3dd0552698f51e9d5b39633d548554e3f6753a45: Status 404 returned error can't find the container with id 55f0e03976f8620246a933bf3dd0552698f51e9d5b39633d548554e3f6753a45 Jan 28 19:11:08 crc kubenswrapper[4767]: I0128 19:11:08.730137 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lpj4m"] Jan 28 19:11:09 crc kubenswrapper[4767]: I0128 19:11:09.654183 4767 generic.go:334] "Generic (PLEG): container finished" podID="731b3090-7969-4da5-bcf3-e011f64563ee" containerID="bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292" exitCode=0 Jan 28 19:11:09 crc kubenswrapper[4767]: I0128 19:11:09.654329 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpj4m" event={"ID":"731b3090-7969-4da5-bcf3-e011f64563ee","Type":"ContainerDied","Data":"bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292"} Jan 28 19:11:09 crc kubenswrapper[4767]: I0128 19:11:09.654634 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpj4m" event={"ID":"731b3090-7969-4da5-bcf3-e011f64563ee","Type":"ContainerStarted","Data":"55f0e03976f8620246a933bf3dd0552698f51e9d5b39633d548554e3f6753a45"} Jan 28 19:11:11 crc kubenswrapper[4767]: I0128 19:11:11.687448 4767 generic.go:334] "Generic (PLEG): container finished" podID="731b3090-7969-4da5-bcf3-e011f64563ee" containerID="14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1" exitCode=0 Jan 28 19:11:11 crc kubenswrapper[4767]: I0128 19:11:11.687575 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpj4m" event={"ID":"731b3090-7969-4da5-bcf3-e011f64563ee","Type":"ContainerDied","Data":"14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1"} Jan 28 19:11:13 crc kubenswrapper[4767]: I0128 19:11:13.711985 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpj4m" event={"ID":"731b3090-7969-4da5-bcf3-e011f64563ee","Type":"ContainerStarted","Data":"275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf"} Jan 28 19:11:13 crc kubenswrapper[4767]: I0128 19:11:13.744933 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lpj4m" podStartSLOduration=3.750292912 podStartE2EDuration="6.744909483s" podCreationTimestamp="2026-01-28 19:11:07 +0000 UTC" firstStartedPulling="2026-01-28 19:11:09.656452054 +0000 UTC m=+2475.620634928" lastFinishedPulling="2026-01-28 19:11:12.651068625 +0000 UTC m=+2478.615251499" observedRunningTime="2026-01-28 19:11:13.735581418 +0000 UTC m=+2479.699764332" watchObservedRunningTime="2026-01-28 19:11:13.744909483 +0000 UTC m=+2479.709092357" Jan 28 19:11:15 crc kubenswrapper[4767]: I0128 19:11:15.796234 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:11:15 crc kubenswrapper[4767]: E0128 19:11:15.796969 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:11:18 crc kubenswrapper[4767]: I0128 19:11:18.188677 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:18 crc kubenswrapper[4767]: I0128 19:11:18.189980 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:18 crc kubenswrapper[4767]: I0128 19:11:18.242816 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:18 crc kubenswrapper[4767]: I0128 19:11:18.814615 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:18 crc kubenswrapper[4767]: I0128 19:11:18.886173 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lpj4m"] Jan 28 19:11:20 crc kubenswrapper[4767]: I0128 19:11:20.789469 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lpj4m" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="registry-server" containerID="cri-o://275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf" gracePeriod=2 Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.284756 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.363954 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-catalog-content\") pod \"731b3090-7969-4da5-bcf3-e011f64563ee\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.364450 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-utilities\") pod \"731b3090-7969-4da5-bcf3-e011f64563ee\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.364628 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-thzvb\" (UniqueName: \"kubernetes.io/projected/731b3090-7969-4da5-bcf3-e011f64563ee-kube-api-access-thzvb\") pod \"731b3090-7969-4da5-bcf3-e011f64563ee\" (UID: \"731b3090-7969-4da5-bcf3-e011f64563ee\") " Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.365662 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-utilities" (OuterVolumeSpecName: "utilities") pod "731b3090-7969-4da5-bcf3-e011f64563ee" (UID: "731b3090-7969-4da5-bcf3-e011f64563ee"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.372345 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/731b3090-7969-4da5-bcf3-e011f64563ee-kube-api-access-thzvb" (OuterVolumeSpecName: "kube-api-access-thzvb") pod "731b3090-7969-4da5-bcf3-e011f64563ee" (UID: "731b3090-7969-4da5-bcf3-e011f64563ee"). InnerVolumeSpecName "kube-api-access-thzvb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.423543 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "731b3090-7969-4da5-bcf3-e011f64563ee" (UID: "731b3090-7969-4da5-bcf3-e011f64563ee"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.466770 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.466823 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/731b3090-7969-4da5-bcf3-e011f64563ee-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.466837 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-thzvb\" (UniqueName: \"kubernetes.io/projected/731b3090-7969-4da5-bcf3-e011f64563ee-kube-api-access-thzvb\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.805107 4767 generic.go:334] "Generic (PLEG): container finished" podID="731b3090-7969-4da5-bcf3-e011f64563ee" containerID="275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf" exitCode=0 Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.805154 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lpj4m" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.805176 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpj4m" event={"ID":"731b3090-7969-4da5-bcf3-e011f64563ee","Type":"ContainerDied","Data":"275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf"} Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.805683 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lpj4m" event={"ID":"731b3090-7969-4da5-bcf3-e011f64563ee","Type":"ContainerDied","Data":"55f0e03976f8620246a933bf3dd0552698f51e9d5b39633d548554e3f6753a45"} Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.805705 4767 scope.go:117] "RemoveContainer" containerID="275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.836848 4767 scope.go:117] "RemoveContainer" containerID="14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.851061 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lpj4m"] Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.868107 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lpj4m"] Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.875752 4767 scope.go:117] "RemoveContainer" containerID="bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.930767 4767 scope.go:117] "RemoveContainer" containerID="275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf" Jan 28 19:11:21 crc kubenswrapper[4767]: E0128 19:11:21.931357 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf\": container with ID starting with 275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf not found: ID does not exist" containerID="275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.931403 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf"} err="failed to get container status \"275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf\": rpc error: code = NotFound desc = could not find container \"275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf\": container with ID starting with 275efb34f1404ab1d958abb05d9b21244ffb5b64bfc49bd51aa9d8d3a7fbf5cf not found: ID does not exist" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.931435 4767 scope.go:117] "RemoveContainer" containerID="14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1" Jan 28 19:11:21 crc kubenswrapper[4767]: E0128 19:11:21.931860 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1\": container with ID starting with 14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1 not found: ID does not exist" containerID="14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.931913 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1"} err="failed to get container status \"14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1\": rpc error: code = NotFound desc = could not find container \"14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1\": container with ID starting with 14bfd75ad97dc82f2aad43309bb900422129ca95d877d856b5ebc5e41abb48d1 not found: ID does not exist" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.931950 4767 scope.go:117] "RemoveContainer" containerID="bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292" Jan 28 19:11:21 crc kubenswrapper[4767]: E0128 19:11:21.932523 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292\": container with ID starting with bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292 not found: ID does not exist" containerID="bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292" Jan 28 19:11:21 crc kubenswrapper[4767]: I0128 19:11:21.932590 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292"} err="failed to get container status \"bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292\": rpc error: code = NotFound desc = could not find container \"bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292\": container with ID starting with bbad50cc7fa641b4b80d756464099e49ec803b6b8bd6cb9ed2f0282562fbf292 not found: ID does not exist" Jan 28 19:11:22 crc kubenswrapper[4767]: I0128 19:11:22.812871 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" path="/var/lib/kubelet/pods/731b3090-7969-4da5-bcf3-e011f64563ee/volumes" Jan 28 19:11:28 crc kubenswrapper[4767]: I0128 19:11:28.796752 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:11:28 crc kubenswrapper[4767]: E0128 19:11:28.798086 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:11:43 crc kubenswrapper[4767]: I0128 19:11:43.796478 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:11:43 crc kubenswrapper[4767]: E0128 19:11:43.797497 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:11:55 crc kubenswrapper[4767]: I0128 19:11:55.165773 4767 generic.go:334] "Generic (PLEG): container finished" podID="b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" containerID="5f787434a8967b8b8ce0dca7d4fc92374fe190da93fa051c25162bdd13a9b2ab" exitCode=0 Jan 28 19:11:55 crc kubenswrapper[4767]: I0128 19:11:55.165871 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" event={"ID":"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a","Type":"ContainerDied","Data":"5f787434a8967b8b8ce0dca7d4fc92374fe190da93fa051c25162bdd13a9b2ab"} Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.637856 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.734478 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-ssh-key-openstack-edpm-ipam\") pod \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.734686 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-combined-ca-bundle\") pod \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.734718 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-inventory\") pod \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.734831 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wrrdk\" (UniqueName: \"kubernetes.io/projected/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-kube-api-access-wrrdk\") pod \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.734952 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-secret-0\") pod \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\" (UID: \"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a\") " Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.742578 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" (UID: "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.747507 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-kube-api-access-wrrdk" (OuterVolumeSpecName: "kube-api-access-wrrdk") pod "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" (UID: "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a"). InnerVolumeSpecName "kube-api-access-wrrdk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.774043 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" (UID: "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.778170 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" (UID: "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.778414 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-inventory" (OuterVolumeSpecName: "inventory") pod "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" (UID: "b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.837900 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.837956 4767 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.837970 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.837979 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wrrdk\" (UniqueName: \"kubernetes.io/projected/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-kube-api-access-wrrdk\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:56 crc kubenswrapper[4767]: I0128 19:11:56.837992 4767 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.189314 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" event={"ID":"b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a","Type":"ContainerDied","Data":"7f3d1caa5913b7c743d0c606ff250b541520f9a7da201d5e8a5de5264a52d98d"} Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.189386 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7f3d1caa5913b7c743d0c606ff250b541520f9a7da201d5e8a5de5264a52d98d" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.189923 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-nkz64" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.300058 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw"] Jan 28 19:11:57 crc kubenswrapper[4767]: E0128 19:11:57.301357 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="extract-utilities" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.301388 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="extract-utilities" Jan 28 19:11:57 crc kubenswrapper[4767]: E0128 19:11:57.301409 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="registry-server" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.301419 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="registry-server" Jan 28 19:11:57 crc kubenswrapper[4767]: E0128 19:11:57.301448 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="extract-content" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.301456 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="extract-content" Jan 28 19:11:57 crc kubenswrapper[4767]: E0128 19:11:57.301474 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.301488 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.301751 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.301781 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="731b3090-7969-4da5-bcf3-e011f64563ee" containerName="registry-server" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.302778 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.306408 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.306449 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.306876 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.307603 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.307803 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.313165 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.314951 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.320581 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw"] Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452343 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452398 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452422 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452462 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452494 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452606 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452660 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452725 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.452797 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w72tr\" (UniqueName: \"kubernetes.io/projected/65d614d7-80ab-4e73-a07c-ee9639e65436-kube-api-access-w72tr\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.554721 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.554784 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.554816 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.554845 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.554886 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.554910 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w72tr\" (UniqueName: \"kubernetes.io/projected/65d614d7-80ab-4e73-a07c-ee9639e65436-kube-api-access-w72tr\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.555025 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.555057 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.555080 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.558008 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.561577 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.563423 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-ssh-key-openstack-edpm-ipam\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.563617 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.569043 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.569802 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.573362 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.574355 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.577670 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w72tr\" (UniqueName: \"kubernetes.io/projected/65d614d7-80ab-4e73-a07c-ee9639e65436-kube-api-access-w72tr\") pod \"nova-edpm-deployment-openstack-edpm-ipam-26dlw\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.623473 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:11:57 crc kubenswrapper[4767]: I0128 19:11:57.796898 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:11:57 crc kubenswrapper[4767]: E0128 19:11:57.797117 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:11:58 crc kubenswrapper[4767]: I0128 19:11:58.197143 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw"] Jan 28 19:11:59 crc kubenswrapper[4767]: I0128 19:11:59.211333 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" event={"ID":"65d614d7-80ab-4e73-a07c-ee9639e65436","Type":"ContainerStarted","Data":"0b5190e3ffa38aa6e0dbadd3a4a0d6b52dffc29b8ee1b4bff08e85737cf308e6"} Jan 28 19:11:59 crc kubenswrapper[4767]: I0128 19:11:59.211845 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" event={"ID":"65d614d7-80ab-4e73-a07c-ee9639e65436","Type":"ContainerStarted","Data":"dc94dbe97c6b7c06b1216e99a457dcc8198c3183d3336b023abec955bdce0529"} Jan 28 19:11:59 crc kubenswrapper[4767]: I0128 19:11:59.244292 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" podStartSLOduration=1.614012043 podStartE2EDuration="2.244260533s" podCreationTimestamp="2026-01-28 19:11:57 +0000 UTC" firstStartedPulling="2026-01-28 19:11:58.203036923 +0000 UTC m=+2524.167219797" lastFinishedPulling="2026-01-28 19:11:58.833285413 +0000 UTC m=+2524.797468287" observedRunningTime="2026-01-28 19:11:59.234720722 +0000 UTC m=+2525.198903636" watchObservedRunningTime="2026-01-28 19:11:59.244260533 +0000 UTC m=+2525.208443417" Jan 28 19:12:10 crc kubenswrapper[4767]: I0128 19:12:10.796552 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:12:10 crc kubenswrapper[4767]: E0128 19:12:10.797593 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:12:22 crc kubenswrapper[4767]: I0128 19:12:22.796925 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:12:22 crc kubenswrapper[4767]: E0128 19:12:22.798116 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:12:37 crc kubenswrapper[4767]: I0128 19:12:37.796460 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:12:37 crc kubenswrapper[4767]: E0128 19:12:37.797302 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:12:48 crc kubenswrapper[4767]: I0128 19:12:48.796187 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:12:48 crc kubenswrapper[4767]: E0128 19:12:48.797559 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:13:02 crc kubenswrapper[4767]: I0128 19:13:02.796498 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:13:02 crc kubenswrapper[4767]: E0128 19:13:02.798647 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:13:14 crc kubenswrapper[4767]: I0128 19:13:14.804576 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:13:14 crc kubenswrapper[4767]: E0128 19:13:14.806241 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:13:28 crc kubenswrapper[4767]: I0128 19:13:28.795510 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:13:29 crc kubenswrapper[4767]: I0128 19:13:29.100672 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"9b30692c20237f6d3a61c844c8c559261899c03e8d7843270da599625da8d099"} Jan 28 19:14:18 crc kubenswrapper[4767]: I0128 19:14:18.590236 4767 generic.go:334] "Generic (PLEG): container finished" podID="65d614d7-80ab-4e73-a07c-ee9639e65436" containerID="0b5190e3ffa38aa6e0dbadd3a4a0d6b52dffc29b8ee1b4bff08e85737cf308e6" exitCode=0 Jan 28 19:14:18 crc kubenswrapper[4767]: I0128 19:14:18.590338 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" event={"ID":"65d614d7-80ab-4e73-a07c-ee9639e65436","Type":"ContainerDied","Data":"0b5190e3ffa38aa6e0dbadd3a4a0d6b52dffc29b8ee1b4bff08e85737cf308e6"} Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.059379 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129100 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-0\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129189 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-extra-config-0\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129293 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-combined-ca-bundle\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129385 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-1\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129415 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w72tr\" (UniqueName: \"kubernetes.io/projected/65d614d7-80ab-4e73-a07c-ee9639e65436-kube-api-access-w72tr\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129634 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-ssh-key-openstack-edpm-ipam\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129672 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-1\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129704 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-0\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.129736 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-inventory\") pod \"65d614d7-80ab-4e73-a07c-ee9639e65436\" (UID: \"65d614d7-80ab-4e73-a07c-ee9639e65436\") " Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.146581 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/65d614d7-80ab-4e73-a07c-ee9639e65436-kube-api-access-w72tr" (OuterVolumeSpecName: "kube-api-access-w72tr") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "kube-api-access-w72tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.147374 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.164185 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.166340 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.166529 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-inventory" (OuterVolumeSpecName: "inventory") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.166995 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.182188 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.188890 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.196911 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "65d614d7-80ab-4e73-a07c-ee9639e65436" (UID: "65d614d7-80ab-4e73-a07c-ee9639e65436"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232829 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232886 4767 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232896 4767 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232907 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232918 4767 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232927 4767 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232935 4767 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232947 4767 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/65d614d7-80ab-4e73-a07c-ee9639e65436-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.232959 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w72tr\" (UniqueName: \"kubernetes.io/projected/65d614d7-80ab-4e73-a07c-ee9639e65436-kube-api-access-w72tr\") on node \"crc\" DevicePath \"\"" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.615267 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.615386 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-26dlw" event={"ID":"65d614d7-80ab-4e73-a07c-ee9639e65436","Type":"ContainerDied","Data":"dc94dbe97c6b7c06b1216e99a457dcc8198c3183d3336b023abec955bdce0529"} Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.615461 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dc94dbe97c6b7c06b1216e99a457dcc8198c3183d3336b023abec955bdce0529" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.725844 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl"] Jan 28 19:14:20 crc kubenswrapper[4767]: E0128 19:14:20.728049 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="65d614d7-80ab-4e73-a07c-ee9639e65436" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.728080 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="65d614d7-80ab-4e73-a07c-ee9639e65436" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.728353 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="65d614d7-80ab-4e73-a07c-ee9639e65436" containerName="nova-edpm-deployment-openstack-edpm-ipam" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.729229 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.734932 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.740638 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.740840 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-w58zb" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.741103 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.741120 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.746540 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl"] Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.849565 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.849794 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.850306 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.850663 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.850722 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.850805 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.850848 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j5s2b\" (UniqueName: \"kubernetes.io/projected/91ba9732-3ebf-4a6b-8090-3314d9ece64f-kube-api-access-j5s2b\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.952483 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.952557 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.952608 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.952632 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j5s2b\" (UniqueName: \"kubernetes.io/projected/91ba9732-3ebf-4a6b-8090-3314d9ece64f-kube-api-access-j5s2b\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.952672 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.952717 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.952818 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.959906 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.960155 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.960478 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.961349 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.963844 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ssh-key-openstack-edpm-ipam\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.970076 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:20 crc kubenswrapper[4767]: I0128 19:14:20.974396 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j5s2b\" (UniqueName: \"kubernetes.io/projected/91ba9732-3ebf-4a6b-8090-3314d9ece64f-kube-api-access-j5s2b\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-x6scl\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:21 crc kubenswrapper[4767]: I0128 19:14:21.056467 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:14:21 crc kubenswrapper[4767]: I0128 19:14:21.669652 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl"] Jan 28 19:14:22 crc kubenswrapper[4767]: I0128 19:14:22.648105 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" event={"ID":"91ba9732-3ebf-4a6b-8090-3314d9ece64f","Type":"ContainerStarted","Data":"f585e333eb57b76343958670c1f8fcb7f65e8a0d3d220197f763549be54d0e77"} Jan 28 19:14:22 crc kubenswrapper[4767]: I0128 19:14:22.648675 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" event={"ID":"91ba9732-3ebf-4a6b-8090-3314d9ece64f","Type":"ContainerStarted","Data":"e7c4817ffadc48c6a502a29721dc8712f8276300c35bb0c195ad24d4ee6967d9"} Jan 28 19:14:22 crc kubenswrapper[4767]: I0128 19:14:22.677681 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" podStartSLOduration=2.226811054 podStartE2EDuration="2.67765065s" podCreationTimestamp="2026-01-28 19:14:20 +0000 UTC" firstStartedPulling="2026-01-28 19:14:21.67766903 +0000 UTC m=+2667.641851924" lastFinishedPulling="2026-01-28 19:14:22.128508636 +0000 UTC m=+2668.092691520" observedRunningTime="2026-01-28 19:14:22.666970054 +0000 UTC m=+2668.631152938" watchObservedRunningTime="2026-01-28 19:14:22.67765065 +0000 UTC m=+2668.641833524" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.161763 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc"] Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.164989 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.168813 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.168889 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.178891 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc"] Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.274934 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed1d023d-d08c-40fe-9b15-091e11384900-secret-volume\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.275022 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5xn76\" (UniqueName: \"kubernetes.io/projected/ed1d023d-d08c-40fe-9b15-091e11384900-kube-api-access-5xn76\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.275058 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed1d023d-d08c-40fe-9b15-091e11384900-config-volume\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.378045 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed1d023d-d08c-40fe-9b15-091e11384900-secret-volume\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.378160 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5xn76\" (UniqueName: \"kubernetes.io/projected/ed1d023d-d08c-40fe-9b15-091e11384900-kube-api-access-5xn76\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.378197 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed1d023d-d08c-40fe-9b15-091e11384900-config-volume\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.379512 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed1d023d-d08c-40fe-9b15-091e11384900-config-volume\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.388242 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed1d023d-d08c-40fe-9b15-091e11384900-secret-volume\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.412465 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5xn76\" (UniqueName: \"kubernetes.io/projected/ed1d023d-d08c-40fe-9b15-091e11384900-kube-api-access-5xn76\") pod \"collect-profiles-29493795-c2hbc\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:00 crc kubenswrapper[4767]: I0128 19:15:00.489079 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:01 crc kubenswrapper[4767]: I0128 19:15:01.040538 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc"] Jan 28 19:15:01 crc kubenswrapper[4767]: W0128 19:15:01.042319 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded1d023d_d08c_40fe_9b15_091e11384900.slice/crio-109a82fef5465fa44886c048dd1e277975d8390dfcf347062a7ad305778b46df WatchSource:0}: Error finding container 109a82fef5465fa44886c048dd1e277975d8390dfcf347062a7ad305778b46df: Status 404 returned error can't find the container with id 109a82fef5465fa44886c048dd1e277975d8390dfcf347062a7ad305778b46df Jan 28 19:15:02 crc kubenswrapper[4767]: I0128 19:15:02.064658 4767 generic.go:334] "Generic (PLEG): container finished" podID="ed1d023d-d08c-40fe-9b15-091e11384900" containerID="4908ae670ca306210110991e2f401a0c6b19244060c705e945d84cc2501cc947" exitCode=0 Jan 28 19:15:02 crc kubenswrapper[4767]: I0128 19:15:02.064858 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" event={"ID":"ed1d023d-d08c-40fe-9b15-091e11384900","Type":"ContainerDied","Data":"4908ae670ca306210110991e2f401a0c6b19244060c705e945d84cc2501cc947"} Jan 28 19:15:02 crc kubenswrapper[4767]: I0128 19:15:02.065658 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" event={"ID":"ed1d023d-d08c-40fe-9b15-091e11384900","Type":"ContainerStarted","Data":"109a82fef5465fa44886c048dd1e277975d8390dfcf347062a7ad305778b46df"} Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.467398 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.657420 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed1d023d-d08c-40fe-9b15-091e11384900-config-volume\") pod \"ed1d023d-d08c-40fe-9b15-091e11384900\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.657465 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed1d023d-d08c-40fe-9b15-091e11384900-secret-volume\") pod \"ed1d023d-d08c-40fe-9b15-091e11384900\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.657798 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5xn76\" (UniqueName: \"kubernetes.io/projected/ed1d023d-d08c-40fe-9b15-091e11384900-kube-api-access-5xn76\") pod \"ed1d023d-d08c-40fe-9b15-091e11384900\" (UID: \"ed1d023d-d08c-40fe-9b15-091e11384900\") " Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.658475 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ed1d023d-d08c-40fe-9b15-091e11384900-config-volume" (OuterVolumeSpecName: "config-volume") pod "ed1d023d-d08c-40fe-9b15-091e11384900" (UID: "ed1d023d-d08c-40fe-9b15-091e11384900"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.669557 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ed1d023d-d08c-40fe-9b15-091e11384900-kube-api-access-5xn76" (OuterVolumeSpecName: "kube-api-access-5xn76") pod "ed1d023d-d08c-40fe-9b15-091e11384900" (UID: "ed1d023d-d08c-40fe-9b15-091e11384900"). InnerVolumeSpecName "kube-api-access-5xn76". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.670384 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ed1d023d-d08c-40fe-9b15-091e11384900-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ed1d023d-d08c-40fe-9b15-091e11384900" (UID: "ed1d023d-d08c-40fe-9b15-091e11384900"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.760837 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5xn76\" (UniqueName: \"kubernetes.io/projected/ed1d023d-d08c-40fe-9b15-091e11384900-kube-api-access-5xn76\") on node \"crc\" DevicePath \"\"" Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.760910 4767 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ed1d023d-d08c-40fe-9b15-091e11384900-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:15:03 crc kubenswrapper[4767]: I0128 19:15:03.760927 4767 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ed1d023d-d08c-40fe-9b15-091e11384900-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:15:04 crc kubenswrapper[4767]: I0128 19:15:04.087386 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" event={"ID":"ed1d023d-d08c-40fe-9b15-091e11384900","Type":"ContainerDied","Data":"109a82fef5465fa44886c048dd1e277975d8390dfcf347062a7ad305778b46df"} Jan 28 19:15:04 crc kubenswrapper[4767]: I0128 19:15:04.087868 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="109a82fef5465fa44886c048dd1e277975d8390dfcf347062a7ad305778b46df" Jan 28 19:15:04 crc kubenswrapper[4767]: I0128 19:15:04.087472 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493795-c2hbc" Jan 28 19:15:04 crc kubenswrapper[4767]: I0128 19:15:04.594141 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m"] Jan 28 19:15:04 crc kubenswrapper[4767]: I0128 19:15:04.605122 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493750-rcm4m"] Jan 28 19:15:04 crc kubenswrapper[4767]: I0128 19:15:04.811041 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bcde5fe1-42de-449a-9743-2f313a33659a" path="/var/lib/kubelet/pods/bcde5fe1-42de-449a-9743-2f313a33659a/volumes" Jan 28 19:15:45 crc kubenswrapper[4767]: I0128 19:15:45.455645 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:15:45 crc kubenswrapper[4767]: I0128 19:15:45.456706 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:15:52 crc kubenswrapper[4767]: I0128 19:15:52.993530 4767 scope.go:117] "RemoveContainer" containerID="687db6835b47ef59bf0a8c5a29178c8469635231464e234ae28e76c219f66f33" Jan 28 19:16:15 crc kubenswrapper[4767]: I0128 19:16:15.456022 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:16:15 crc kubenswrapper[4767]: I0128 19:16:15.458396 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:16:44 crc kubenswrapper[4767]: I0128 19:16:44.084952 4767 generic.go:334] "Generic (PLEG): container finished" podID="91ba9732-3ebf-4a6b-8090-3314d9ece64f" containerID="f585e333eb57b76343958670c1f8fcb7f65e8a0d3d220197f763549be54d0e77" exitCode=0 Jan 28 19:16:44 crc kubenswrapper[4767]: I0128 19:16:44.085812 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" event={"ID":"91ba9732-3ebf-4a6b-8090-3314d9ece64f","Type":"ContainerDied","Data":"f585e333eb57b76343958670c1f8fcb7f65e8a0d3d220197f763549be54d0e77"} Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.459087 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.459787 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.459851 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.461026 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9b30692c20237f6d3a61c844c8c559261899c03e8d7843270da599625da8d099"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.461097 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://9b30692c20237f6d3a61c844c8c559261899c03e8d7843270da599625da8d099" gracePeriod=600 Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.558286 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.685353 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-telemetry-combined-ca-bundle\") pod \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.685789 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j5s2b\" (UniqueName: \"kubernetes.io/projected/91ba9732-3ebf-4a6b-8090-3314d9ece64f-kube-api-access-j5s2b\") pod \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.685886 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-inventory\") pod \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.685969 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-0\") pod \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.686133 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-1\") pod \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.686314 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ssh-key-openstack-edpm-ipam\") pod \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.686390 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-2\") pod \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\" (UID: \"91ba9732-3ebf-4a6b-8090-3314d9ece64f\") " Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.694755 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "91ba9732-3ebf-4a6b-8090-3314d9ece64f" (UID: "91ba9732-3ebf-4a6b-8090-3314d9ece64f"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.694971 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91ba9732-3ebf-4a6b-8090-3314d9ece64f-kube-api-access-j5s2b" (OuterVolumeSpecName: "kube-api-access-j5s2b") pod "91ba9732-3ebf-4a6b-8090-3314d9ece64f" (UID: "91ba9732-3ebf-4a6b-8090-3314d9ece64f"). InnerVolumeSpecName "kube-api-access-j5s2b". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.722400 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "91ba9732-3ebf-4a6b-8090-3314d9ece64f" (UID: "91ba9732-3ebf-4a6b-8090-3314d9ece64f"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.723559 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "91ba9732-3ebf-4a6b-8090-3314d9ece64f" (UID: "91ba9732-3ebf-4a6b-8090-3314d9ece64f"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.724157 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "91ba9732-3ebf-4a6b-8090-3314d9ece64f" (UID: "91ba9732-3ebf-4a6b-8090-3314d9ece64f"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.726810 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "91ba9732-3ebf-4a6b-8090-3314d9ece64f" (UID: "91ba9732-3ebf-4a6b-8090-3314d9ece64f"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.732148 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-inventory" (OuterVolumeSpecName: "inventory") pod "91ba9732-3ebf-4a6b-8090-3314d9ece64f" (UID: "91ba9732-3ebf-4a6b-8090-3314d9ece64f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.789583 4767 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.789969 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j5s2b\" (UniqueName: \"kubernetes.io/projected/91ba9732-3ebf-4a6b-8090-3314d9ece64f-kube-api-access-j5s2b\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.790061 4767 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-inventory\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.790136 4767 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.790249 4767 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.790391 4767 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.790548 4767 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/91ba9732-3ebf-4a6b-8090-3314d9ece64f-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.962897 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2fq5b"] Jan 28 19:16:45 crc kubenswrapper[4767]: E0128 19:16:45.964228 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ed1d023d-d08c-40fe-9b15-091e11384900" containerName="collect-profiles" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.964256 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="ed1d023d-d08c-40fe-9b15-091e11384900" containerName="collect-profiles" Jan 28 19:16:45 crc kubenswrapper[4767]: E0128 19:16:45.964293 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="91ba9732-3ebf-4a6b-8090-3314d9ece64f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.964306 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="91ba9732-3ebf-4a6b-8090-3314d9ece64f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.964606 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="91ba9732-3ebf-4a6b-8090-3314d9ece64f" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.964638 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="ed1d023d-d08c-40fe-9b15-091e11384900" containerName="collect-profiles" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.966628 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:45 crc kubenswrapper[4767]: I0128 19:16:45.977008 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2fq5b"] Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.098366 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-catalog-content\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.098575 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-utilities\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.098758 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mw7g8\" (UniqueName: \"kubernetes.io/projected/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-kube-api-access-mw7g8\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.115449 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="9b30692c20237f6d3a61c844c8c559261899c03e8d7843270da599625da8d099" exitCode=0 Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.115778 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"9b30692c20237f6d3a61c844c8c559261899c03e8d7843270da599625da8d099"} Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.115878 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e"} Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.115962 4767 scope.go:117] "RemoveContainer" containerID="6ecc2d08155e17712335e39edfbd13ac7f360621395547d7990b79aa60c7e94e" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.120856 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" event={"ID":"91ba9732-3ebf-4a6b-8090-3314d9ece64f","Type":"ContainerDied","Data":"e7c4817ffadc48c6a502a29721dc8712f8276300c35bb0c195ad24d4ee6967d9"} Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.120928 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e7c4817ffadc48c6a502a29721dc8712f8276300c35bb0c195ad24d4ee6967d9" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.121016 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-x6scl" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.201714 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mw7g8\" (UniqueName: \"kubernetes.io/projected/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-kube-api-access-mw7g8\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.201900 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-catalog-content\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.201968 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-utilities\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.202724 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-utilities\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.202862 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-catalog-content\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.231616 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mw7g8\" (UniqueName: \"kubernetes.io/projected/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-kube-api-access-mw7g8\") pod \"redhat-operators-2fq5b\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.288165 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:46 crc kubenswrapper[4767]: W0128 19:16:46.822654 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda48fa804_21d7_4eb9_9fd2_df3ec7008f01.slice/crio-36011e333c70b67047abe00166b1ba6b3875a8b8476bdff159a708bd952d49d3 WatchSource:0}: Error finding container 36011e333c70b67047abe00166b1ba6b3875a8b8476bdff159a708bd952d49d3: Status 404 returned error can't find the container with id 36011e333c70b67047abe00166b1ba6b3875a8b8476bdff159a708bd952d49d3 Jan 28 19:16:46 crc kubenswrapper[4767]: I0128 19:16:46.833316 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2fq5b"] Jan 28 19:16:47 crc kubenswrapper[4767]: I0128 19:16:47.133085 4767 generic.go:334] "Generic (PLEG): container finished" podID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerID="63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b" exitCode=0 Jan 28 19:16:47 crc kubenswrapper[4767]: I0128 19:16:47.133181 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2fq5b" event={"ID":"a48fa804-21d7-4eb9-9fd2-df3ec7008f01","Type":"ContainerDied","Data":"63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b"} Jan 28 19:16:47 crc kubenswrapper[4767]: I0128 19:16:47.134017 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2fq5b" event={"ID":"a48fa804-21d7-4eb9-9fd2-df3ec7008f01","Type":"ContainerStarted","Data":"36011e333c70b67047abe00166b1ba6b3875a8b8476bdff159a708bd952d49d3"} Jan 28 19:16:47 crc kubenswrapper[4767]: I0128 19:16:47.135397 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:16:48 crc kubenswrapper[4767]: I0128 19:16:48.152793 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2fq5b" event={"ID":"a48fa804-21d7-4eb9-9fd2-df3ec7008f01","Type":"ContainerStarted","Data":"06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e"} Jan 28 19:16:49 crc kubenswrapper[4767]: I0128 19:16:49.164065 4767 generic.go:334] "Generic (PLEG): container finished" podID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerID="06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e" exitCode=0 Jan 28 19:16:49 crc kubenswrapper[4767]: I0128 19:16:49.164124 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2fq5b" event={"ID":"a48fa804-21d7-4eb9-9fd2-df3ec7008f01","Type":"ContainerDied","Data":"06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e"} Jan 28 19:16:51 crc kubenswrapper[4767]: I0128 19:16:51.185677 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2fq5b" event={"ID":"a48fa804-21d7-4eb9-9fd2-df3ec7008f01","Type":"ContainerStarted","Data":"34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350"} Jan 28 19:16:51 crc kubenswrapper[4767]: I0128 19:16:51.215904 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2fq5b" podStartSLOduration=3.773635525 podStartE2EDuration="6.215880521s" podCreationTimestamp="2026-01-28 19:16:45 +0000 UTC" firstStartedPulling="2026-01-28 19:16:47.135075863 +0000 UTC m=+2813.099258737" lastFinishedPulling="2026-01-28 19:16:49.577320859 +0000 UTC m=+2815.541503733" observedRunningTime="2026-01-28 19:16:51.207167776 +0000 UTC m=+2817.171350670" watchObservedRunningTime="2026-01-28 19:16:51.215880521 +0000 UTC m=+2817.180063395" Jan 28 19:16:56 crc kubenswrapper[4767]: I0128 19:16:56.288260 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:56 crc kubenswrapper[4767]: I0128 19:16:56.289183 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:56 crc kubenswrapper[4767]: I0128 19:16:56.346491 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:57 crc kubenswrapper[4767]: I0128 19:16:57.289004 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:57 crc kubenswrapper[4767]: I0128 19:16:57.349924 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2fq5b"] Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.262061 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2fq5b" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="registry-server" containerID="cri-o://34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350" gracePeriod=2 Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.745479 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.885631 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mw7g8\" (UniqueName: \"kubernetes.io/projected/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-kube-api-access-mw7g8\") pod \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.885728 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-catalog-content\") pod \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.886107 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-utilities\") pod \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\" (UID: \"a48fa804-21d7-4eb9-9fd2-df3ec7008f01\") " Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.886947 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-utilities" (OuterVolumeSpecName: "utilities") pod "a48fa804-21d7-4eb9-9fd2-df3ec7008f01" (UID: "a48fa804-21d7-4eb9-9fd2-df3ec7008f01"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.893526 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-kube-api-access-mw7g8" (OuterVolumeSpecName: "kube-api-access-mw7g8") pod "a48fa804-21d7-4eb9-9fd2-df3ec7008f01" (UID: "a48fa804-21d7-4eb9-9fd2-df3ec7008f01"). InnerVolumeSpecName "kube-api-access-mw7g8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.990082 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:16:59 crc kubenswrapper[4767]: I0128 19:16:59.990153 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mw7g8\" (UniqueName: \"kubernetes.io/projected/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-kube-api-access-mw7g8\") on node \"crc\" DevicePath \"\"" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.017139 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a48fa804-21d7-4eb9-9fd2-df3ec7008f01" (UID: "a48fa804-21d7-4eb9-9fd2-df3ec7008f01"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.092560 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a48fa804-21d7-4eb9-9fd2-df3ec7008f01-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.274476 4767 generic.go:334] "Generic (PLEG): container finished" podID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerID="34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350" exitCode=0 Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.274539 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2fq5b" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.274542 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2fq5b" event={"ID":"a48fa804-21d7-4eb9-9fd2-df3ec7008f01","Type":"ContainerDied","Data":"34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350"} Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.274586 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2fq5b" event={"ID":"a48fa804-21d7-4eb9-9fd2-df3ec7008f01","Type":"ContainerDied","Data":"36011e333c70b67047abe00166b1ba6b3875a8b8476bdff159a708bd952d49d3"} Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.274608 4767 scope.go:117] "RemoveContainer" containerID="34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.307357 4767 scope.go:117] "RemoveContainer" containerID="06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.324136 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2fq5b"] Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.338474 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2fq5b"] Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.339764 4767 scope.go:117] "RemoveContainer" containerID="63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.383688 4767 scope.go:117] "RemoveContainer" containerID="34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350" Jan 28 19:17:00 crc kubenswrapper[4767]: E0128 19:17:00.385287 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350\": container with ID starting with 34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350 not found: ID does not exist" containerID="34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.385363 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350"} err="failed to get container status \"34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350\": rpc error: code = NotFound desc = could not find container \"34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350\": container with ID starting with 34cefa389f4a6a27386e0109a67f30d355b29127cce82ef55557d4bcb38e4350 not found: ID does not exist" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.385408 4767 scope.go:117] "RemoveContainer" containerID="06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e" Jan 28 19:17:00 crc kubenswrapper[4767]: E0128 19:17:00.385964 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e\": container with ID starting with 06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e not found: ID does not exist" containerID="06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.386001 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e"} err="failed to get container status \"06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e\": rpc error: code = NotFound desc = could not find container \"06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e\": container with ID starting with 06045afd81472467aec228d151a1e1977ea67bb62c20156f57a032a27178ff8e not found: ID does not exist" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.386037 4767 scope.go:117] "RemoveContainer" containerID="63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b" Jan 28 19:17:00 crc kubenswrapper[4767]: E0128 19:17:00.386827 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b\": container with ID starting with 63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b not found: ID does not exist" containerID="63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.386901 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b"} err="failed to get container status \"63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b\": rpc error: code = NotFound desc = could not find container \"63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b\": container with ID starting with 63d360149f9d8537f62d72f95423e4f3cbf6f09936dd4d14fa9cee558276390b not found: ID does not exist" Jan 28 19:17:00 crc kubenswrapper[4767]: I0128 19:17:00.809118 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" path="/var/lib/kubelet/pods/a48fa804-21d7-4eb9-9fd2-df3ec7008f01/volumes" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.346420 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b5zfm"] Jan 28 19:18:19 crc kubenswrapper[4767]: E0128 19:18:19.347511 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="extract-content" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.347531 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="extract-content" Jan 28 19:18:19 crc kubenswrapper[4767]: E0128 19:18:19.347549 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="registry-server" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.347560 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="registry-server" Jan 28 19:18:19 crc kubenswrapper[4767]: E0128 19:18:19.347577 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="extract-utilities" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.347584 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="extract-utilities" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.347896 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="a48fa804-21d7-4eb9-9fd2-df3ec7008f01" containerName="registry-server" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.349825 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.362134 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b5zfm"] Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.502017 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8n6q\" (UniqueName: \"kubernetes.io/projected/64a0df59-e7e7-4c4c-b497-8fcb259284d7-kube-api-access-g8n6q\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.502111 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-catalog-content\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.502153 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-utilities\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.604681 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8n6q\" (UniqueName: \"kubernetes.io/projected/64a0df59-e7e7-4c4c-b497-8fcb259284d7-kube-api-access-g8n6q\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.604767 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-catalog-content\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.604808 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-utilities\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.605457 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-utilities\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.605765 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-catalog-content\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.629254 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8n6q\" (UniqueName: \"kubernetes.io/projected/64a0df59-e7e7-4c4c-b497-8fcb259284d7-kube-api-access-g8n6q\") pod \"community-operators-b5zfm\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:19 crc kubenswrapper[4767]: I0128 19:18:19.679680 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:20 crc kubenswrapper[4767]: I0128 19:18:20.271625 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b5zfm"] Jan 28 19:18:21 crc kubenswrapper[4767]: I0128 19:18:21.076002 4767 generic.go:334] "Generic (PLEG): container finished" podID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerID="18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e" exitCode=0 Jan 28 19:18:21 crc kubenswrapper[4767]: I0128 19:18:21.076133 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b5zfm" event={"ID":"64a0df59-e7e7-4c4c-b497-8fcb259284d7","Type":"ContainerDied","Data":"18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e"} Jan 28 19:18:21 crc kubenswrapper[4767]: I0128 19:18:21.077046 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b5zfm" event={"ID":"64a0df59-e7e7-4c4c-b497-8fcb259284d7","Type":"ContainerStarted","Data":"3221e571745246ab800750f507e8a8aa48627d504ef203e06156b6a07af8b4cb"} Jan 28 19:18:23 crc kubenswrapper[4767]: I0128 19:18:23.105108 4767 generic.go:334] "Generic (PLEG): container finished" podID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerID="a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10" exitCode=0 Jan 28 19:18:23 crc kubenswrapper[4767]: I0128 19:18:23.105345 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b5zfm" event={"ID":"64a0df59-e7e7-4c4c-b497-8fcb259284d7","Type":"ContainerDied","Data":"a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10"} Jan 28 19:18:25 crc kubenswrapper[4767]: I0128 19:18:25.131757 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b5zfm" event={"ID":"64a0df59-e7e7-4c4c-b497-8fcb259284d7","Type":"ContainerStarted","Data":"bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31"} Jan 28 19:18:25 crc kubenswrapper[4767]: I0128 19:18:25.168094 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b5zfm" podStartSLOduration=3.4236871779999998 podStartE2EDuration="6.168069085s" podCreationTimestamp="2026-01-28 19:18:19 +0000 UTC" firstStartedPulling="2026-01-28 19:18:21.078593223 +0000 UTC m=+2907.042776097" lastFinishedPulling="2026-01-28 19:18:23.82297513 +0000 UTC m=+2909.787158004" observedRunningTime="2026-01-28 19:18:25.157011417 +0000 UTC m=+2911.121194291" watchObservedRunningTime="2026-01-28 19:18:25.168069085 +0000 UTC m=+2911.132251969" Jan 28 19:18:29 crc kubenswrapper[4767]: I0128 19:18:29.680391 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:29 crc kubenswrapper[4767]: I0128 19:18:29.680722 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:29 crc kubenswrapper[4767]: I0128 19:18:29.739083 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:30 crc kubenswrapper[4767]: I0128 19:18:30.257332 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:30 crc kubenswrapper[4767]: I0128 19:18:30.312225 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b5zfm"] Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.218861 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b5zfm" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="registry-server" containerID="cri-o://bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31" gracePeriod=2 Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.688326 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.811062 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-catalog-content\") pod \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.811580 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8n6q\" (UniqueName: \"kubernetes.io/projected/64a0df59-e7e7-4c4c-b497-8fcb259284d7-kube-api-access-g8n6q\") pod \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.812292 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-utilities\") pod \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\" (UID: \"64a0df59-e7e7-4c4c-b497-8fcb259284d7\") " Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.813171 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-utilities" (OuterVolumeSpecName: "utilities") pod "64a0df59-e7e7-4c4c-b497-8fcb259284d7" (UID: "64a0df59-e7e7-4c4c-b497-8fcb259284d7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.824494 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/64a0df59-e7e7-4c4c-b497-8fcb259284d7-kube-api-access-g8n6q" (OuterVolumeSpecName: "kube-api-access-g8n6q") pod "64a0df59-e7e7-4c4c-b497-8fcb259284d7" (UID: "64a0df59-e7e7-4c4c-b497-8fcb259284d7"). InnerVolumeSpecName "kube-api-access-g8n6q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.915537 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8n6q\" (UniqueName: \"kubernetes.io/projected/64a0df59-e7e7-4c4c-b497-8fcb259284d7-kube-api-access-g8n6q\") on node \"crc\" DevicePath \"\"" Jan 28 19:18:32 crc kubenswrapper[4767]: I0128 19:18:32.915580 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.142031 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "64a0df59-e7e7-4c4c-b497-8fcb259284d7" (UID: "64a0df59-e7e7-4c4c-b497-8fcb259284d7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.223172 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/64a0df59-e7e7-4c4c-b497-8fcb259284d7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.231433 4767 generic.go:334] "Generic (PLEG): container finished" podID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerID="bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31" exitCode=0 Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.231497 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b5zfm" event={"ID":"64a0df59-e7e7-4c4c-b497-8fcb259284d7","Type":"ContainerDied","Data":"bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31"} Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.231548 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b5zfm" event={"ID":"64a0df59-e7e7-4c4c-b497-8fcb259284d7","Type":"ContainerDied","Data":"3221e571745246ab800750f507e8a8aa48627d504ef203e06156b6a07af8b4cb"} Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.231571 4767 scope.go:117] "RemoveContainer" containerID="bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.231621 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b5zfm" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.261096 4767 scope.go:117] "RemoveContainer" containerID="a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.272457 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b5zfm"] Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.282008 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b5zfm"] Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.295268 4767 scope.go:117] "RemoveContainer" containerID="18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.335390 4767 scope.go:117] "RemoveContainer" containerID="bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31" Jan 28 19:18:33 crc kubenswrapper[4767]: E0128 19:18:33.335954 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31\": container with ID starting with bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31 not found: ID does not exist" containerID="bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.336038 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31"} err="failed to get container status \"bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31\": rpc error: code = NotFound desc = could not find container \"bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31\": container with ID starting with bdf9c8589c00353a03f74cdfd0374d9f47a1f61633c613199b6fa7263173cf31 not found: ID does not exist" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.336088 4767 scope.go:117] "RemoveContainer" containerID="a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10" Jan 28 19:18:33 crc kubenswrapper[4767]: E0128 19:18:33.336630 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10\": container with ID starting with a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10 not found: ID does not exist" containerID="a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.336695 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10"} err="failed to get container status \"a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10\": rpc error: code = NotFound desc = could not find container \"a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10\": container with ID starting with a61b6d6c512b0c56a05d05f1386b061d0d70d469b15b9f03d266e166c3260f10 not found: ID does not exist" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.336746 4767 scope.go:117] "RemoveContainer" containerID="18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e" Jan 28 19:18:33 crc kubenswrapper[4767]: E0128 19:18:33.337357 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e\": container with ID starting with 18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e not found: ID does not exist" containerID="18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e" Jan 28 19:18:33 crc kubenswrapper[4767]: I0128 19:18:33.337447 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e"} err="failed to get container status \"18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e\": rpc error: code = NotFound desc = could not find container \"18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e\": container with ID starting with 18b08b8f557b1ceb91b1bfc7b75f346403ce6ca04e111279ba85f0a40f24665e not found: ID does not exist" Jan 28 19:18:34 crc kubenswrapper[4767]: I0128 19:18:34.807294 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" path="/var/lib/kubelet/pods/64a0df59-e7e7-4c4c-b497-8fcb259284d7/volumes" Jan 28 19:18:45 crc kubenswrapper[4767]: I0128 19:18:45.455857 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:18:45 crc kubenswrapper[4767]: I0128 19:18:45.456968 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:19:15 crc kubenswrapper[4767]: I0128 19:19:15.455949 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:19:15 crc kubenswrapper[4767]: I0128 19:19:15.456682 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:19:27 crc kubenswrapper[4767]: I0128 19:19:27.043906 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-79f6f75b9c-dhf5c_0d7c50d3-1348-43e5-a8fa-f05cd53d2a42/manager/0.log" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.280843 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.282298 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" containerName="openstackclient" containerID="cri-o://72b28aebca58d31b790efe757402aa16fa1a7dfb5722945d2ab0a9629f7c4ce1" gracePeriod=2 Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.302663 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.313569 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: E0128 19:19:29.314251 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="extract-utilities" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.314288 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="extract-utilities" Jan 28 19:19:29 crc kubenswrapper[4767]: E0128 19:19:29.314332 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" containerName="openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.314339 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" containerName="openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: E0128 19:19:29.314359 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="extract-content" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.314366 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="extract-content" Jan 28 19:19:29 crc kubenswrapper[4767]: E0128 19:19:29.314381 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="registry-server" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.314388 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="registry-server" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.314600 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" containerName="openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.314615 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="64a0df59-e7e7-4c4c-b497-8fcb259284d7" containerName="registry-server" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.315447 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.327438 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.329651 4767 status_manager.go:875] "Failed to update status for pod" pod="openstack/openstackclient" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ac6ad33f-9bb6-4b24-994c-a8611affb68d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:19:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:19:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:19:29Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:19:29Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"openstackclient\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/clouds.yaml\\\",\\\"name\\\":\\\"openstack-config\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/secure.yaml\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/cloudrc\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\\\",\\\"name\\\":\\\"combined-ca-bundle\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bn8vd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T19:19:29Z\\\"}}\" for pod \"openstack\"/\"openstackclient\": pods \"openstackclient\" not found" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.338892 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: E0128 19:19:29.344498 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-bn8vd openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[combined-ca-bundle kube-api-access-bn8vd openstack-config openstack-config-secret]: context canceled" pod="openstack/openstackclient" podUID="ac6ad33f-9bb6-4b24-994c-a8611affb68d" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.349285 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.371895 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.373723 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.381861 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.381977 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.382009 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kd47\" (UniqueName: \"kubernetes.io/projected/98b184ce-a502-4335-a4fb-1475658bed4b-kube-api-access-6kd47\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.382040 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config-secret\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.390275 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.407316 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.424410 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ac6ad33f-9bb6-4b24-994c-a8611affb68d" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.484056 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.484200 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.484342 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kd47\" (UniqueName: \"kubernetes.io/projected/98b184ce-a502-4335-a4fb-1475658bed4b-kube-api-access-6kd47\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.484374 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config-secret\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.486479 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.495079 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-combined-ca-bundle\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.495108 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config-secret\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.515130 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kd47\" (UniqueName: \"kubernetes.io/projected/98b184ce-a502-4335-a4fb-1475658bed4b-kube-api-access-6kd47\") pod \"openstackclient\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " pod="openstack/openstackclient" Jan 28 19:19:29 crc kubenswrapper[4767]: I0128 19:19:29.703791 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.250075 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.333198 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.333285 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"98b184ce-a502-4335-a4fb-1475658bed4b","Type":"ContainerStarted","Data":"4c8960cd9d7be323b2a195b76d9957cd0147e1a21068316c49a258b220220058"} Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.338299 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ac6ad33f-9bb6-4b24-994c-a8611affb68d" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.349985 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.353867 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ac6ad33f-9bb6-4b24-994c-a8611affb68d" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.812187 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac6ad33f-9bb6-4b24-994c-a8611affb68d" path="/var/lib/kubelet/pods/ac6ad33f-9bb6-4b24-994c-a8611affb68d/volumes" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.816706 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-create-4dn5d"] Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.818662 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.820921 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-4dn5d"] Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.918901 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-1000-account-create-update-qv25f"] Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.920613 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.923726 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-db-secret" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.931974 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-1000-account-create-update-qv25f"] Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.933363 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k8rdr\" (UniqueName: \"kubernetes.io/projected/b83f436b-cd7e-4591-be88-69f803dde429-kube-api-access-k8rdr\") pod \"aodh-db-create-4dn5d\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.933472 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r44tn\" (UniqueName: \"kubernetes.io/projected/4174c5fe-9a20-41cf-b261-272c0c99734f-kube-api-access-r44tn\") pod \"aodh-1000-account-create-update-qv25f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.933572 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4174c5fe-9a20-41cf-b261-272c0c99734f-operator-scripts\") pod \"aodh-1000-account-create-update-qv25f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:30.933726 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b83f436b-cd7e-4591-be88-69f803dde429-operator-scripts\") pod \"aodh-db-create-4dn5d\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.035762 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r44tn\" (UniqueName: \"kubernetes.io/projected/4174c5fe-9a20-41cf-b261-272c0c99734f-kube-api-access-r44tn\") pod \"aodh-1000-account-create-update-qv25f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.035833 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4174c5fe-9a20-41cf-b261-272c0c99734f-operator-scripts\") pod \"aodh-1000-account-create-update-qv25f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.035902 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b83f436b-cd7e-4591-be88-69f803dde429-operator-scripts\") pod \"aodh-db-create-4dn5d\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.036050 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k8rdr\" (UniqueName: \"kubernetes.io/projected/b83f436b-cd7e-4591-be88-69f803dde429-kube-api-access-k8rdr\") pod \"aodh-db-create-4dn5d\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.037226 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4174c5fe-9a20-41cf-b261-272c0c99734f-operator-scripts\") pod \"aodh-1000-account-create-update-qv25f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.037275 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b83f436b-cd7e-4591-be88-69f803dde429-operator-scripts\") pod \"aodh-db-create-4dn5d\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.057365 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k8rdr\" (UniqueName: \"kubernetes.io/projected/b83f436b-cd7e-4591-be88-69f803dde429-kube-api-access-k8rdr\") pod \"aodh-db-create-4dn5d\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.063315 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r44tn\" (UniqueName: \"kubernetes.io/projected/4174c5fe-9a20-41cf-b261-272c0c99734f-kube-api-access-r44tn\") pod \"aodh-1000-account-create-update-qv25f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.158002 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.263494 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.355994 4767 generic.go:334] "Generic (PLEG): container finished" podID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" containerID="72b28aebca58d31b790efe757402aa16fa1a7dfb5722945d2ab0a9629f7c4ce1" exitCode=137 Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.454733 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.455047 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"98b184ce-a502-4335-a4fb-1475658bed4b","Type":"ContainerStarted","Data":"1b9ad0d125732d9a9b87a9cd1f51ffec115d3b7716227021b9f186c061bc5d96"} Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.475283 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ac6ad33f-9bb6-4b24-994c-a8611affb68d" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" Jan 28 19:19:31 crc kubenswrapper[4767]: W0128 19:19:31.789300 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb83f436b_cd7e_4591_be88_69f803dde429.slice/crio-df790d2cffb188ee7ceedf69218eb72622509418d9ec5be8d10ada3eadee4f04 WatchSource:0}: Error finding container df790d2cffb188ee7ceedf69218eb72622509418d9ec5be8d10ada3eadee4f04: Status 404 returned error can't find the container with id df790d2cffb188ee7ceedf69218eb72622509418d9ec5be8d10ada3eadee4f04 Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.792821 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.792788791 podStartE2EDuration="2.792788791s" podCreationTimestamp="2026-01-28 19:19:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:19:31.47130306 +0000 UTC m=+2977.435485934" watchObservedRunningTime="2026-01-28 19:19:31.792788791 +0000 UTC m=+2977.756971655" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.800233 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-1000-account-create-update-qv25f"] Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.828058 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-create-4dn5d"] Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.889807 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.980533 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config\") pod \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.981134 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-combined-ca-bundle\") pod \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.981264 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26r4f\" (UniqueName: \"kubernetes.io/projected/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-kube-api-access-26r4f\") pod \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.981409 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config-secret\") pod \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\" (UID: \"7a0ada4b-d0dd-4984-8dd3-95a6814a45e1\") " Jan 28 19:19:31 crc kubenswrapper[4767]: I0128 19:19:31.994810 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-kube-api-access-26r4f" (OuterVolumeSpecName: "kube-api-access-26r4f") pod "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" (UID: "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1"). InnerVolumeSpecName "kube-api-access-26r4f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.021249 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" (UID: "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.030094 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" (UID: "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.047957 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" (UID: "7a0ada4b-d0dd-4984-8dd3-95a6814a45e1"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.084547 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.084609 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.084625 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.084643 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26r4f\" (UniqueName: \"kubernetes.io/projected/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1-kube-api-access-26r4f\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.467190 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-4dn5d" event={"ID":"b83f436b-cd7e-4591-be88-69f803dde429","Type":"ContainerStarted","Data":"f02ce9d0c1fed72b255ca5cefc86570c6a36859d50d5613b3e6096cb76d44929"} Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.467254 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-4dn5d" event={"ID":"b83f436b-cd7e-4591-be88-69f803dde429","Type":"ContainerStarted","Data":"df790d2cffb188ee7ceedf69218eb72622509418d9ec5be8d10ada3eadee4f04"} Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.472825 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1000-account-create-update-qv25f" event={"ID":"4174c5fe-9a20-41cf-b261-272c0c99734f","Type":"ContainerStarted","Data":"b6930c79f40b830058e6ca30ae1f47807208a7f869e6d15809859bc7a5a28f1b"} Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.473062 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1000-account-create-update-qv25f" event={"ID":"4174c5fe-9a20-41cf-b261-272c0c99734f","Type":"ContainerStarted","Data":"23ed52824f84a3ed0ea64af7ad65656377a870a9761024a72808a4351ef69dfe"} Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.475265 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.475281 4767 scope.go:117] "RemoveContainer" containerID="72b28aebca58d31b790efe757402aa16fa1a7dfb5722945d2ab0a9629f7c4ce1" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.503662 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-create-4dn5d" podStartSLOduration=2.50363284 podStartE2EDuration="2.50363284s" podCreationTimestamp="2026-01-28 19:19:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:19:32.488407109 +0000 UTC m=+2978.452589983" watchObservedRunningTime="2026-01-28 19:19:32.50363284 +0000 UTC m=+2978.467815714" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.659391 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" Jan 28 19:19:32 crc kubenswrapper[4767]: I0128 19:19:32.810239 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7a0ada4b-d0dd-4984-8dd3-95a6814a45e1" path="/var/lib/kubelet/pods/7a0ada4b-d0dd-4984-8dd3-95a6814a45e1/volumes" Jan 28 19:19:33 crc kubenswrapper[4767]: I0128 19:19:33.488354 4767 generic.go:334] "Generic (PLEG): container finished" podID="4174c5fe-9a20-41cf-b261-272c0c99734f" containerID="b6930c79f40b830058e6ca30ae1f47807208a7f869e6d15809859bc7a5a28f1b" exitCode=0 Jan 28 19:19:33 crc kubenswrapper[4767]: I0128 19:19:33.488429 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1000-account-create-update-qv25f" event={"ID":"4174c5fe-9a20-41cf-b261-272c0c99734f","Type":"ContainerDied","Data":"b6930c79f40b830058e6ca30ae1f47807208a7f869e6d15809859bc7a5a28f1b"} Jan 28 19:19:33 crc kubenswrapper[4767]: I0128 19:19:33.492643 4767 generic.go:334] "Generic (PLEG): container finished" podID="b83f436b-cd7e-4591-be88-69f803dde429" containerID="f02ce9d0c1fed72b255ca5cefc86570c6a36859d50d5613b3e6096cb76d44929" exitCode=0 Jan 28 19:19:33 crc kubenswrapper[4767]: I0128 19:19:33.492699 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-4dn5d" event={"ID":"b83f436b-cd7e-4591-be88-69f803dde429","Type":"ContainerDied","Data":"f02ce9d0c1fed72b255ca5cefc86570c6a36859d50d5613b3e6096cb76d44929"} Jan 28 19:19:34 crc kubenswrapper[4767]: I0128 19:19:34.938055 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.044781 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.072809 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r44tn\" (UniqueName: \"kubernetes.io/projected/4174c5fe-9a20-41cf-b261-272c0c99734f-kube-api-access-r44tn\") pod \"4174c5fe-9a20-41cf-b261-272c0c99734f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.073285 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4174c5fe-9a20-41cf-b261-272c0c99734f-operator-scripts\") pod \"4174c5fe-9a20-41cf-b261-272c0c99734f\" (UID: \"4174c5fe-9a20-41cf-b261-272c0c99734f\") " Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.073982 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4174c5fe-9a20-41cf-b261-272c0c99734f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4174c5fe-9a20-41cf-b261-272c0c99734f" (UID: "4174c5fe-9a20-41cf-b261-272c0c99734f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.083692 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4174c5fe-9a20-41cf-b261-272c0c99734f-kube-api-access-r44tn" (OuterVolumeSpecName: "kube-api-access-r44tn") pod "4174c5fe-9a20-41cf-b261-272c0c99734f" (UID: "4174c5fe-9a20-41cf-b261-272c0c99734f"). InnerVolumeSpecName "kube-api-access-r44tn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.175832 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b83f436b-cd7e-4591-be88-69f803dde429-operator-scripts\") pod \"b83f436b-cd7e-4591-be88-69f803dde429\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.176010 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k8rdr\" (UniqueName: \"kubernetes.io/projected/b83f436b-cd7e-4591-be88-69f803dde429-kube-api-access-k8rdr\") pod \"b83f436b-cd7e-4591-be88-69f803dde429\" (UID: \"b83f436b-cd7e-4591-be88-69f803dde429\") " Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.176721 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b83f436b-cd7e-4591-be88-69f803dde429-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b83f436b-cd7e-4591-be88-69f803dde429" (UID: "b83f436b-cd7e-4591-be88-69f803dde429"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.176753 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r44tn\" (UniqueName: \"kubernetes.io/projected/4174c5fe-9a20-41cf-b261-272c0c99734f-kube-api-access-r44tn\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.176833 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4174c5fe-9a20-41cf-b261-272c0c99734f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.180600 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b83f436b-cd7e-4591-be88-69f803dde429-kube-api-access-k8rdr" (OuterVolumeSpecName: "kube-api-access-k8rdr") pod "b83f436b-cd7e-4591-be88-69f803dde429" (UID: "b83f436b-cd7e-4591-be88-69f803dde429"). InnerVolumeSpecName "kube-api-access-k8rdr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.278714 4767 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b83f436b-cd7e-4591-be88-69f803dde429-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.278766 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k8rdr\" (UniqueName: \"kubernetes.io/projected/b83f436b-cd7e-4591-be88-69f803dde429-kube-api-access-k8rdr\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.524657 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-1000-account-create-update-qv25f" event={"ID":"4174c5fe-9a20-41cf-b261-272c0c99734f","Type":"ContainerDied","Data":"23ed52824f84a3ed0ea64af7ad65656377a870a9761024a72808a4351ef69dfe"} Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.524735 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="23ed52824f84a3ed0ea64af7ad65656377a870a9761024a72808a4351ef69dfe" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.524767 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-1000-account-create-update-qv25f" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.531508 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-create-4dn5d" Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.531468 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-create-4dn5d" event={"ID":"b83f436b-cd7e-4591-be88-69f803dde429","Type":"ContainerDied","Data":"df790d2cffb188ee7ceedf69218eb72622509418d9ec5be8d10ada3eadee4f04"} Jan 28 19:19:35 crc kubenswrapper[4767]: I0128 19:19:35.533028 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="df790d2cffb188ee7ceedf69218eb72622509418d9ec5be8d10ada3eadee4f04" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.382017 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-db-sync-trb59"] Jan 28 19:19:36 crc kubenswrapper[4767]: E0128 19:19:36.383347 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4174c5fe-9a20-41cf-b261-272c0c99734f" containerName="mariadb-account-create-update" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.383369 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="4174c5fe-9a20-41cf-b261-272c0c99734f" containerName="mariadb-account-create-update" Jan 28 19:19:36 crc kubenswrapper[4767]: E0128 19:19:36.383417 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b83f436b-cd7e-4591-be88-69f803dde429" containerName="mariadb-database-create" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.383427 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b83f436b-cd7e-4591-be88-69f803dde429" containerName="mariadb-database-create" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.383647 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b83f436b-cd7e-4591-be88-69f803dde429" containerName="mariadb-database-create" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.383673 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="4174c5fe-9a20-41cf-b261-272c0c99734f" containerName="mariadb-account-create-update" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.384648 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.387261 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.387924 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.388061 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.388444 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-chl48" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.397445 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-trb59"] Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.527528 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-scripts\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.527619 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tc9g\" (UniqueName: \"kubernetes.io/projected/d099bfa1-60e9-4edd-8457-d3825393e7d0-kube-api-access-8tc9g\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.528475 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-config-data\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.528770 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-combined-ca-bundle\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.631330 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-scripts\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.631653 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tc9g\" (UniqueName: \"kubernetes.io/projected/d099bfa1-60e9-4edd-8457-d3825393e7d0-kube-api-access-8tc9g\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.631803 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-config-data\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.632055 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-combined-ca-bundle\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.639426 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-scripts\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.640455 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-config-data\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.644066 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-combined-ca-bundle\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.650150 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tc9g\" (UniqueName: \"kubernetes.io/projected/d099bfa1-60e9-4edd-8457-d3825393e7d0-kube-api-access-8tc9g\") pod \"aodh-db-sync-trb59\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:36 crc kubenswrapper[4767]: I0128 19:19:36.718740 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:37 crc kubenswrapper[4767]: I0128 19:19:37.244712 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-db-sync-trb59"] Jan 28 19:19:37 crc kubenswrapper[4767]: I0128 19:19:37.555254 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-trb59" event={"ID":"d099bfa1-60e9-4edd-8457-d3825393e7d0","Type":"ContainerStarted","Data":"2b65cb7fc13fde0292a9d88604e496c5bacd6ab53b49d1744c46794c4e02f4a0"} Jan 28 19:19:42 crc kubenswrapper[4767]: I0128 19:19:42.644560 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-trb59" event={"ID":"d099bfa1-60e9-4edd-8457-d3825393e7d0","Type":"ContainerStarted","Data":"eed5a0c7aa341e0be51fdb9cb9bc7d01dad5bb689cfb7d7fb6388ef6ae4966c4"} Jan 28 19:19:42 crc kubenswrapper[4767]: I0128 19:19:42.675893 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-db-sync-trb59" podStartSLOduration=2.092626603 podStartE2EDuration="6.675867183s" podCreationTimestamp="2026-01-28 19:19:36 +0000 UTC" firstStartedPulling="2026-01-28 19:19:37.257325821 +0000 UTC m=+2983.221508695" lastFinishedPulling="2026-01-28 19:19:41.840566411 +0000 UTC m=+2987.804749275" observedRunningTime="2026-01-28 19:19:42.663055919 +0000 UTC m=+2988.627238813" watchObservedRunningTime="2026-01-28 19:19:42.675867183 +0000 UTC m=+2988.640050067" Jan 28 19:19:44 crc kubenswrapper[4767]: I0128 19:19:44.669030 4767 generic.go:334] "Generic (PLEG): container finished" podID="d099bfa1-60e9-4edd-8457-d3825393e7d0" containerID="eed5a0c7aa341e0be51fdb9cb9bc7d01dad5bb689cfb7d7fb6388ef6ae4966c4" exitCode=0 Jan 28 19:19:44 crc kubenswrapper[4767]: I0128 19:19:44.669781 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-trb59" event={"ID":"d099bfa1-60e9-4edd-8457-d3825393e7d0","Type":"ContainerDied","Data":"eed5a0c7aa341e0be51fdb9cb9bc7d01dad5bb689cfb7d7fb6388ef6ae4966c4"} Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.455188 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.455311 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.455397 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.456609 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.456706 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" gracePeriod=600 Jan 28 19:19:45 crc kubenswrapper[4767]: E0128 19:19:45.600369 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.683821 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" exitCode=0 Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.683856 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e"} Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.683963 4767 scope.go:117] "RemoveContainer" containerID="9b30692c20237f6d3a61c844c8c559261899c03e8d7843270da599625da8d099" Jan 28 19:19:45 crc kubenswrapper[4767]: I0128 19:19:45.685107 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:19:45 crc kubenswrapper[4767]: E0128 19:19:45.685749 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.113184 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.211188 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-combined-ca-bundle\") pod \"d099bfa1-60e9-4edd-8457-d3825393e7d0\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.211441 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tc9g\" (UniqueName: \"kubernetes.io/projected/d099bfa1-60e9-4edd-8457-d3825393e7d0-kube-api-access-8tc9g\") pod \"d099bfa1-60e9-4edd-8457-d3825393e7d0\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.211641 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-config-data\") pod \"d099bfa1-60e9-4edd-8457-d3825393e7d0\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.211803 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-scripts\") pod \"d099bfa1-60e9-4edd-8457-d3825393e7d0\" (UID: \"d099bfa1-60e9-4edd-8457-d3825393e7d0\") " Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.222487 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d099bfa1-60e9-4edd-8457-d3825393e7d0-kube-api-access-8tc9g" (OuterVolumeSpecName: "kube-api-access-8tc9g") pod "d099bfa1-60e9-4edd-8457-d3825393e7d0" (UID: "d099bfa1-60e9-4edd-8457-d3825393e7d0"). InnerVolumeSpecName "kube-api-access-8tc9g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.224135 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-scripts" (OuterVolumeSpecName: "scripts") pod "d099bfa1-60e9-4edd-8457-d3825393e7d0" (UID: "d099bfa1-60e9-4edd-8457-d3825393e7d0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.250505 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d099bfa1-60e9-4edd-8457-d3825393e7d0" (UID: "d099bfa1-60e9-4edd-8457-d3825393e7d0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.250957 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-config-data" (OuterVolumeSpecName: "config-data") pod "d099bfa1-60e9-4edd-8457-d3825393e7d0" (UID: "d099bfa1-60e9-4edd-8457-d3825393e7d0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.314290 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.314364 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.314378 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d099bfa1-60e9-4edd-8457-d3825393e7d0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.314392 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tc9g\" (UniqueName: \"kubernetes.io/projected/d099bfa1-60e9-4edd-8457-d3825393e7d0-kube-api-access-8tc9g\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.699198 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-db-sync-trb59" event={"ID":"d099bfa1-60e9-4edd-8457-d3825393e7d0","Type":"ContainerDied","Data":"2b65cb7fc13fde0292a9d88604e496c5bacd6ab53b49d1744c46794c4e02f4a0"} Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.699678 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2b65cb7fc13fde0292a9d88604e496c5bacd6ab53b49d1744c46794c4e02f4a0" Jan 28 19:19:46 crc kubenswrapper[4767]: I0128 19:19:46.699263 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-db-sync-trb59" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.879154 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 19:19:50 crc kubenswrapper[4767]: E0128 19:19:50.880353 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d099bfa1-60e9-4edd-8457-d3825393e7d0" containerName="aodh-db-sync" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.880372 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d099bfa1-60e9-4edd-8457-d3825393e7d0" containerName="aodh-db-sync" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.880624 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="d099bfa1-60e9-4edd-8457-d3825393e7d0" containerName="aodh-db-sync" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.886125 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.891754 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.892002 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-chl48" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.892517 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.912752 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.919377 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cl7pz\" (UniqueName: \"kubernetes.io/projected/1b4fbadd-8296-4437-9419-d0c94991808b-kube-api-access-cl7pz\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.919612 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-config-data\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.919659 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:50 crc kubenswrapper[4767]: I0128 19:19:50.919736 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-scripts\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.021964 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cl7pz\" (UniqueName: \"kubernetes.io/projected/1b4fbadd-8296-4437-9419-d0c94991808b-kube-api-access-cl7pz\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.022139 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-config-data\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.022224 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.022282 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-scripts\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.031661 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-combined-ca-bundle\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.052015 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-config-data\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.052243 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-scripts\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.052793 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cl7pz\" (UniqueName: \"kubernetes.io/projected/1b4fbadd-8296-4437-9419-d0c94991808b-kube-api-access-cl7pz\") pod \"aodh-0\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.232092 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:19:51 crc kubenswrapper[4767]: I0128 19:19:51.844716 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:19:51 crc kubenswrapper[4767]: W0128 19:19:51.857100 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b4fbadd_8296_4437_9419_d0c94991808b.slice/crio-b6e0cd2ad66b88987771c715ecb71bb427ea31d00b166e821bd4524080de1d8d WatchSource:0}: Error finding container b6e0cd2ad66b88987771c715ecb71bb427ea31d00b166e821bd4524080de1d8d: Status 404 returned error can't find the container with id b6e0cd2ad66b88987771c715ecb71bb427ea31d00b166e821bd4524080de1d8d Jan 28 19:19:52 crc kubenswrapper[4767]: I0128 19:19:52.769295 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerStarted","Data":"b6e0cd2ad66b88987771c715ecb71bb427ea31d00b166e821bd4524080de1d8d"} Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.441133 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.446672 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-central-agent" containerID="cri-o://2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced" gracePeriod=30 Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.446672 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="proxy-httpd" containerID="cri-o://ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b" gracePeriod=30 Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.446884 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-notification-agent" containerID="cri-o://d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300" gracePeriod=30 Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.446868 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="sg-core" containerID="cri-o://6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb" gracePeriod=30 Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.784485 4767 generic.go:334] "Generic (PLEG): container finished" podID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerID="ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b" exitCode=0 Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.784530 4767 generic.go:334] "Generic (PLEG): container finished" podID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerID="6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb" exitCode=2 Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.784592 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerDied","Data":"ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b"} Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.784628 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerDied","Data":"6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb"} Jan 28 19:19:53 crc kubenswrapper[4767]: I0128 19:19:53.786834 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerStarted","Data":"48893babeb8d2fb2415c7fed1650433fb26b8f8414c16398df40b0e5fd7f4041"} Jan 28 19:19:54 crc kubenswrapper[4767]: I0128 19:19:54.861743 4767 generic.go:334] "Generic (PLEG): container finished" podID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerID="2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced" exitCode=0 Jan 28 19:19:54 crc kubenswrapper[4767]: I0128 19:19:54.890053 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerDied","Data":"2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced"} Jan 28 19:19:54 crc kubenswrapper[4767]: I0128 19:19:54.901907 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.621508 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.853140 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-run-httpd\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.854539 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.855165 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9v9g2\" (UniqueName: \"kubernetes.io/projected/6740b200-cccc-45bb-84c7-a524cf79fcff-kube-api-access-9v9g2\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.855326 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-log-httpd\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.855464 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-ceilometer-tls-certs\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.855537 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-scripts\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.855566 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-sg-core-conf-yaml\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.855693 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-combined-ca-bundle\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.855719 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-config-data\") pod \"6740b200-cccc-45bb-84c7-a524cf79fcff\" (UID: \"6740b200-cccc-45bb-84c7-a524cf79fcff\") " Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.856153 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.858199 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.858289 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6740b200-cccc-45bb-84c7-a524cf79fcff-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.863475 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6740b200-cccc-45bb-84c7-a524cf79fcff-kube-api-access-9v9g2" (OuterVolumeSpecName: "kube-api-access-9v9g2") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "kube-api-access-9v9g2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.885025 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-scripts" (OuterVolumeSpecName: "scripts") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.902480 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.903997 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerStarted","Data":"bb1d31e891fbaccd45ced725b7ec1afbb0d8dad4b7abcebf2f5687f7b4e121dd"} Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.917067 4767 generic.go:334] "Generic (PLEG): container finished" podID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerID="d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300" exitCode=0 Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.919506 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerDied","Data":"d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300"} Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.919559 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6740b200-cccc-45bb-84c7-a524cf79fcff","Type":"ContainerDied","Data":"a6aae9074650910a6e1718660d7339bfb36d1dd788e537f7bf38b750af15c5af"} Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.919584 4767 scope.go:117] "RemoveContainer" containerID="ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.919743 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.955198 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.963775 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9v9g2\" (UniqueName: \"kubernetes.io/projected/6740b200-cccc-45bb-84c7-a524cf79fcff-kube-api-access-9v9g2\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.963813 4767 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.963867 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.963880 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:55 crc kubenswrapper[4767]: I0128 19:19:55.982120 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.007036 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-config-data" (OuterVolumeSpecName: "config-data") pod "6740b200-cccc-45bb-84c7-a524cf79fcff" (UID: "6740b200-cccc-45bb-84c7-a524cf79fcff"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.048558 4767 scope.go:117] "RemoveContainer" containerID="6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.066054 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.066363 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6740b200-cccc-45bb-84c7-a524cf79fcff-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.088266 4767 scope.go:117] "RemoveContainer" containerID="d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.123849 4767 scope.go:117] "RemoveContainer" containerID="2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.148700 4767 scope.go:117] "RemoveContainer" containerID="ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b" Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.149462 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b\": container with ID starting with ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b not found: ID does not exist" containerID="ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.149510 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b"} err="failed to get container status \"ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b\": rpc error: code = NotFound desc = could not find container \"ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b\": container with ID starting with ce6d018f69f4d41dc79823f8da618da2ab38d388e6baed86ece273b9559b9f2b not found: ID does not exist" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.149540 4767 scope.go:117] "RemoveContainer" containerID="6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb" Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.150342 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb\": container with ID starting with 6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb not found: ID does not exist" containerID="6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.150371 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb"} err="failed to get container status \"6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb\": rpc error: code = NotFound desc = could not find container \"6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb\": container with ID starting with 6039e6edf6b0e4b416cb6c5ad0551474c3e22a45d875ac0bd90cbbac8ead84eb not found: ID does not exist" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.150388 4767 scope.go:117] "RemoveContainer" containerID="d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300" Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.150664 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300\": container with ID starting with d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300 not found: ID does not exist" containerID="d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.150688 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300"} err="failed to get container status \"d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300\": rpc error: code = NotFound desc = could not find container \"d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300\": container with ID starting with d6b6a46652fbe2ec1bd1ba462775f7296b081664b6feb972588fdc8c8d641300 not found: ID does not exist" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.150703 4767 scope.go:117] "RemoveContainer" containerID="2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced" Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.151134 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced\": container with ID starting with 2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced not found: ID does not exist" containerID="2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.151161 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced"} err="failed to get container status \"2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced\": rpc error: code = NotFound desc = could not find container \"2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced\": container with ID starting with 2db13410edd0980a70ec88652d35373fb9fd27018213f557d886c3059825cced not found: ID does not exist" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.269971 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.281893 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.308368 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.309156 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="proxy-httpd" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309179 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="proxy-httpd" Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.309194 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="sg-core" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309214 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="sg-core" Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.309241 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-central-agent" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309249 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-central-agent" Jan 28 19:19:56 crc kubenswrapper[4767]: E0128 19:19:56.309268 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-notification-agent" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309274 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-notification-agent" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309507 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-notification-agent" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309542 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="sg-core" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309554 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="proxy-httpd" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.309574 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" containerName="ceilometer-central-agent" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.311708 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.315366 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.316080 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.317962 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.353379 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374248 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlw8n\" (UniqueName: \"kubernetes.io/projected/683ed23f-00ab-4e12-b807-d08d46697384-kube-api-access-wlw8n\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374363 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-config-data\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374399 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-run-httpd\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374476 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374514 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374536 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-scripts\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374606 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-log-httpd\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.374641 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.479563 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlw8n\" (UniqueName: \"kubernetes.io/projected/683ed23f-00ab-4e12-b807-d08d46697384-kube-api-access-wlw8n\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.479703 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-config-data\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.479742 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-run-httpd\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.479830 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.479886 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.479928 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-scripts\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.480012 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-log-httpd\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.480050 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.481144 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-run-httpd\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.481527 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-log-httpd\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.492016 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.492747 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-config-data\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.493068 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.500333 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-scripts\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.529998 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.532512 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlw8n\" (UniqueName: \"kubernetes.io/projected/683ed23f-00ab-4e12-b807-d08d46697384-kube-api-access-wlw8n\") pod \"ceilometer-0\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.635678 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:19:56 crc kubenswrapper[4767]: I0128 19:19:56.810960 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6740b200-cccc-45bb-84c7-a524cf79fcff" path="/var/lib/kubelet/pods/6740b200-cccc-45bb-84c7-a524cf79fcff/volumes" Jan 28 19:19:57 crc kubenswrapper[4767]: I0128 19:19:57.648549 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:19:57 crc kubenswrapper[4767]: I0128 19:19:57.941559 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerStarted","Data":"b9f9cf25c2b7581dd2025906864b49cc1607ec98b199621015dd97a5e7b211b6"} Jan 28 19:19:57 crc kubenswrapper[4767]: I0128 19:19:57.946052 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerStarted","Data":"35e53601bb80176b5b7e29a394eeb42f83fe5f43b3111cffdf13ef836e36b58c"} Jan 28 19:19:58 crc kubenswrapper[4767]: I0128 19:19:58.433830 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:19:58 crc kubenswrapper[4767]: I0128 19:19:58.796367 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:19:58 crc kubenswrapper[4767]: E0128 19:19:58.796779 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:19:58 crc kubenswrapper[4767]: I0128 19:19:58.961893 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerStarted","Data":"1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956"} Jan 28 19:19:59 crc kubenswrapper[4767]: I0128 19:19:59.975829 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerStarted","Data":"ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018"} Jan 28 19:19:59 crc kubenswrapper[4767]: I0128 19:19:59.981924 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerStarted","Data":"ba93ca1eef5716b4c71ee92e0c9a3a948f9e590ea7b30a7ad593167f4f4a4408"} Jan 28 19:19:59 crc kubenswrapper[4767]: I0128 19:19:59.982264 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-api" containerID="cri-o://48893babeb8d2fb2415c7fed1650433fb26b8f8414c16398df40b0e5fd7f4041" gracePeriod=30 Jan 28 19:19:59 crc kubenswrapper[4767]: I0128 19:19:59.982273 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-evaluator" containerID="cri-o://bb1d31e891fbaccd45ced725b7ec1afbb0d8dad4b7abcebf2f5687f7b4e121dd" gracePeriod=30 Jan 28 19:19:59 crc kubenswrapper[4767]: I0128 19:19:59.982293 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-listener" containerID="cri-o://ba93ca1eef5716b4c71ee92e0c9a3a948f9e590ea7b30a7ad593167f4f4a4408" gracePeriod=30 Jan 28 19:19:59 crc kubenswrapper[4767]: I0128 19:19:59.982318 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-notifier" containerID="cri-o://35e53601bb80176b5b7e29a394eeb42f83fe5f43b3111cffdf13ef836e36b58c" gracePeriod=30 Jan 28 19:20:00 crc kubenswrapper[4767]: I0128 19:20:00.993446 4767 generic.go:334] "Generic (PLEG): container finished" podID="1b4fbadd-8296-4437-9419-d0c94991808b" containerID="35e53601bb80176b5b7e29a394eeb42f83fe5f43b3111cffdf13ef836e36b58c" exitCode=0 Jan 28 19:20:00 crc kubenswrapper[4767]: I0128 19:20:00.993853 4767 generic.go:334] "Generic (PLEG): container finished" podID="1b4fbadd-8296-4437-9419-d0c94991808b" containerID="bb1d31e891fbaccd45ced725b7ec1afbb0d8dad4b7abcebf2f5687f7b4e121dd" exitCode=0 Jan 28 19:20:00 crc kubenswrapper[4767]: I0128 19:20:00.993869 4767 generic.go:334] "Generic (PLEG): container finished" podID="1b4fbadd-8296-4437-9419-d0c94991808b" containerID="48893babeb8d2fb2415c7fed1650433fb26b8f8414c16398df40b0e5fd7f4041" exitCode=0 Jan 28 19:20:00 crc kubenswrapper[4767]: I0128 19:20:00.993922 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerDied","Data":"35e53601bb80176b5b7e29a394eeb42f83fe5f43b3111cffdf13ef836e36b58c"} Jan 28 19:20:00 crc kubenswrapper[4767]: I0128 19:20:00.993955 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerDied","Data":"bb1d31e891fbaccd45ced725b7ec1afbb0d8dad4b7abcebf2f5687f7b4e121dd"} Jan 28 19:20:00 crc kubenswrapper[4767]: I0128 19:20:00.993968 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerDied","Data":"48893babeb8d2fb2415c7fed1650433fb26b8f8414c16398df40b0e5fd7f4041"} Jan 28 19:20:00 crc kubenswrapper[4767]: I0128 19:20:00.995852 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerStarted","Data":"0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf"} Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.028416 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerStarted","Data":"451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b"} Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.029573 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-central-agent" containerID="cri-o://1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956" gracePeriod=30 Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.030007 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.030462 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="proxy-httpd" containerID="cri-o://451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b" gracePeriod=30 Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.030530 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="sg-core" containerID="cri-o://0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf" gracePeriod=30 Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.030574 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-notification-agent" containerID="cri-o://ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018" gracePeriod=30 Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.084075 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=7.090279087 podStartE2EDuration="14.084038975s" podCreationTimestamp="2026-01-28 19:19:50 +0000 UTC" firstStartedPulling="2026-01-28 19:19:51.864913414 +0000 UTC m=+2997.829096298" lastFinishedPulling="2026-01-28 19:19:58.858673302 +0000 UTC m=+3004.822856186" observedRunningTime="2026-01-28 19:20:00.019456768 +0000 UTC m=+3005.983639652" watchObservedRunningTime="2026-01-28 19:20:04.084038975 +0000 UTC m=+3010.048221849" Jan 28 19:20:04 crc kubenswrapper[4767]: I0128 19:20:04.084384 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.915781741 podStartE2EDuration="8.084374056s" podCreationTimestamp="2026-01-28 19:19:56 +0000 UTC" firstStartedPulling="2026-01-28 19:19:57.648832889 +0000 UTC m=+3003.613015763" lastFinishedPulling="2026-01-28 19:20:02.817425204 +0000 UTC m=+3008.781608078" observedRunningTime="2026-01-28 19:20:04.062642841 +0000 UTC m=+3010.026825725" watchObservedRunningTime="2026-01-28 19:20:04.084374056 +0000 UTC m=+3010.048556950" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.042023 4767 generic.go:334] "Generic (PLEG): container finished" podID="683ed23f-00ab-4e12-b807-d08d46697384" containerID="451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b" exitCode=0 Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.042290 4767 generic.go:334] "Generic (PLEG): container finished" podID="683ed23f-00ab-4e12-b807-d08d46697384" containerID="0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf" exitCode=2 Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.042302 4767 generic.go:334] "Generic (PLEG): container finished" podID="683ed23f-00ab-4e12-b807-d08d46697384" containerID="ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018" exitCode=0 Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.042065 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerDied","Data":"451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b"} Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.042335 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerDied","Data":"0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf"} Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.042345 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerDied","Data":"ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018"} Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.770971 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.874922 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-ceilometer-tls-certs\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.875018 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-sg-core-conf-yaml\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.875091 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-log-httpd\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.875274 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-combined-ca-bundle\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.875363 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-run-httpd\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.875421 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-config-data\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.875549 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-scripts\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.875621 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlw8n\" (UniqueName: \"kubernetes.io/projected/683ed23f-00ab-4e12-b807-d08d46697384-kube-api-access-wlw8n\") pod \"683ed23f-00ab-4e12-b807-d08d46697384\" (UID: \"683ed23f-00ab-4e12-b807-d08d46697384\") " Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.891056 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.894288 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.894762 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/683ed23f-00ab-4e12-b807-d08d46697384-kube-api-access-wlw8n" (OuterVolumeSpecName: "kube-api-access-wlw8n") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "kube-api-access-wlw8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.898416 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-scripts" (OuterVolumeSpecName: "scripts") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.967475 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.984063 4767 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.984133 4767 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.984147 4767 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/683ed23f-00ab-4e12-b807-d08d46697384-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.984159 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:05 crc kubenswrapper[4767]: I0128 19:20:05.984175 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlw8n\" (UniqueName: \"kubernetes.io/projected/683ed23f-00ab-4e12-b807-d08d46697384-kube-api-access-wlw8n\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.065588 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.070948 4767 generic.go:334] "Generic (PLEG): container finished" podID="683ed23f-00ab-4e12-b807-d08d46697384" containerID="1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956" exitCode=0 Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.071139 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerDied","Data":"1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956"} Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.071260 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"683ed23f-00ab-4e12-b807-d08d46697384","Type":"ContainerDied","Data":"b9f9cf25c2b7581dd2025906864b49cc1607ec98b199621015dd97a5e7b211b6"} Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.071346 4767 scope.go:117] "RemoveContainer" containerID="451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.071637 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.086017 4767 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.118473 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.142143 4767 scope.go:117] "RemoveContainer" containerID="0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.181048 4767 scope.go:117] "RemoveContainer" containerID="ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.188776 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.205220 4767 scope.go:117] "RemoveContainer" containerID="1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.228745 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-config-data" (OuterVolumeSpecName: "config-data") pod "683ed23f-00ab-4e12-b807-d08d46697384" (UID: "683ed23f-00ab-4e12-b807-d08d46697384"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.229188 4767 scope.go:117] "RemoveContainer" containerID="451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b" Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.229783 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b\": container with ID starting with 451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b not found: ID does not exist" containerID="451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.229847 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b"} err="failed to get container status \"451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b\": rpc error: code = NotFound desc = could not find container \"451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b\": container with ID starting with 451a1f486fedbea8633cae22701c32af34e41565b1c72f843e0c20f7e6cd542b not found: ID does not exist" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.229905 4767 scope.go:117] "RemoveContainer" containerID="0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf" Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.230282 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf\": container with ID starting with 0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf not found: ID does not exist" containerID="0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.230325 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf"} err="failed to get container status \"0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf\": rpc error: code = NotFound desc = could not find container \"0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf\": container with ID starting with 0fc006a5f001e80a705b2f3af48875119625398411f2ae6fbfb089abd18e92cf not found: ID does not exist" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.230359 4767 scope.go:117] "RemoveContainer" containerID="ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018" Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.230630 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018\": container with ID starting with ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018 not found: ID does not exist" containerID="ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.230663 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018"} err="failed to get container status \"ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018\": rpc error: code = NotFound desc = could not find container \"ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018\": container with ID starting with ba9f520e0194ef58513b396e926a9ab8727d9103604b6b3262e6e1cb08f76018 not found: ID does not exist" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.230686 4767 scope.go:117] "RemoveContainer" containerID="1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956" Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.231072 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956\": container with ID starting with 1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956 not found: ID does not exist" containerID="1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.231130 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956"} err="failed to get container status \"1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956\": rpc error: code = NotFound desc = could not find container \"1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956\": container with ID starting with 1d829c78580c2ce82795ed7a53693a430593c56f1a19c0e64a631ae8adc7e956 not found: ID does not exist" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.290519 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/683ed23f-00ab-4e12-b807-d08d46697384-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.465308 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.488387 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.512955 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.513729 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-central-agent" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.513754 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-central-agent" Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.513776 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="sg-core" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.513784 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="sg-core" Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.513812 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-notification-agent" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.513819 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-notification-agent" Jan 28 19:20:06 crc kubenswrapper[4767]: E0128 19:20:06.513827 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="proxy-httpd" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.513836 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="proxy-httpd" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.514092 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-central-agent" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.514112 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="ceilometer-notification-agent" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.514716 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="sg-core" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.514749 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="683ed23f-00ab-4e12-b807-d08d46697384" containerName="proxy-httpd" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.532271 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.538425 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.538558 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.539135 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.547173 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598223 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598301 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-scripts\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598345 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqr5q\" (UniqueName: \"kubernetes.io/projected/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-kube-api-access-pqr5q\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598375 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598393 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-run-httpd\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598425 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-log-httpd\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598477 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-config-data\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.598503 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702420 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702496 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-scripts\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702554 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqr5q\" (UniqueName: \"kubernetes.io/projected/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-kube-api-access-pqr5q\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702583 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702618 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-run-httpd\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702650 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-log-httpd\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702727 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-config-data\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.702757 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.703609 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-run-httpd\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.704089 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-log-httpd\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.710395 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-scripts\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.711307 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.715834 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.716459 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.722550 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-config-data\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.725330 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqr5q\" (UniqueName: \"kubernetes.io/projected/6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8-kube-api-access-pqr5q\") pod \"ceilometer-0\" (UID: \"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8\") " pod="openstack/ceilometer-0" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.808486 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="683ed23f-00ab-4e12-b807-d08d46697384" path="/var/lib/kubelet/pods/683ed23f-00ab-4e12-b807-d08d46697384/volumes" Jan 28 19:20:06 crc kubenswrapper[4767]: I0128 19:20:06.868609 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 28 19:20:07 crc kubenswrapper[4767]: I0128 19:20:07.381549 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 28 19:20:07 crc kubenswrapper[4767]: W0128 19:20:07.390287 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c0d6e5e_3e93_43a7_9211_7ab997c8c3e8.slice/crio-ef4c1b6585f4274cf06d94b981168ca5f756da7918a0900161fa4b083b14239b WatchSource:0}: Error finding container ef4c1b6585f4274cf06d94b981168ca5f756da7918a0900161fa4b083b14239b: Status 404 returned error can't find the container with id ef4c1b6585f4274cf06d94b981168ca5f756da7918a0900161fa4b083b14239b Jan 28 19:20:08 crc kubenswrapper[4767]: I0128 19:20:08.113323 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8","Type":"ContainerStarted","Data":"ef4c1b6585f4274cf06d94b981168ca5f756da7918a0900161fa4b083b14239b"} Jan 28 19:20:09 crc kubenswrapper[4767]: I0128 19:20:09.125374 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8","Type":"ContainerStarted","Data":"12b9dc2603848ae455264363cef3cc672c0b37cf598eca6f8b7f8eb61e047171"} Jan 28 19:20:09 crc kubenswrapper[4767]: I0128 19:20:09.125766 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8","Type":"ContainerStarted","Data":"3f49e43098798ddae19871b5d35a641f3029ba4aedc505aa312160f14e75a295"} Jan 28 19:20:10 crc kubenswrapper[4767]: I0128 19:20:10.139378 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8","Type":"ContainerStarted","Data":"7c4413e2f01e996622b2b26ac827002e7b7aa299c691b390ff7ee9848bc48d3c"} Jan 28 19:20:12 crc kubenswrapper[4767]: I0128 19:20:12.176543 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8","Type":"ContainerStarted","Data":"1767c8a252f2a97252f7386c623160308c9bb36382ad996d13540c149c7ecc82"} Jan 28 19:20:12 crc kubenswrapper[4767]: I0128 19:20:12.177487 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 28 19:20:12 crc kubenswrapper[4767]: I0128 19:20:12.221138 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.965966648 podStartE2EDuration="6.221106399s" podCreationTimestamp="2026-01-28 19:20:06 +0000 UTC" firstStartedPulling="2026-01-28 19:20:07.393958343 +0000 UTC m=+3013.358141217" lastFinishedPulling="2026-01-28 19:20:11.649098094 +0000 UTC m=+3017.613280968" observedRunningTime="2026-01-28 19:20:12.206943402 +0000 UTC m=+3018.171126336" watchObservedRunningTime="2026-01-28 19:20:12.221106399 +0000 UTC m=+3018.185289273" Jan 28 19:20:12 crc kubenswrapper[4767]: I0128 19:20:12.795960 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:20:12 crc kubenswrapper[4767]: E0128 19:20:12.796535 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:20:27 crc kubenswrapper[4767]: I0128 19:20:27.795866 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:20:27 crc kubenswrapper[4767]: E0128 19:20:27.797058 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.368026 4767 generic.go:334] "Generic (PLEG): container finished" podID="1b4fbadd-8296-4437-9419-d0c94991808b" containerID="ba93ca1eef5716b4c71ee92e0c9a3a948f9e590ea7b30a7ad593167f4f4a4408" exitCode=137 Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.368099 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerDied","Data":"ba93ca1eef5716b4c71ee92e0c9a3a948f9e590ea7b30a7ad593167f4f4a4408"} Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.458495 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.553647 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cl7pz\" (UniqueName: \"kubernetes.io/projected/1b4fbadd-8296-4437-9419-d0c94991808b-kube-api-access-cl7pz\") pod \"1b4fbadd-8296-4437-9419-d0c94991808b\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.553725 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-combined-ca-bundle\") pod \"1b4fbadd-8296-4437-9419-d0c94991808b\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.553852 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-scripts\") pod \"1b4fbadd-8296-4437-9419-d0c94991808b\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.553954 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-config-data\") pod \"1b4fbadd-8296-4437-9419-d0c94991808b\" (UID: \"1b4fbadd-8296-4437-9419-d0c94991808b\") " Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.564735 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b4fbadd-8296-4437-9419-d0c94991808b-kube-api-access-cl7pz" (OuterVolumeSpecName: "kube-api-access-cl7pz") pod "1b4fbadd-8296-4437-9419-d0c94991808b" (UID: "1b4fbadd-8296-4437-9419-d0c94991808b"). InnerVolumeSpecName "kube-api-access-cl7pz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.565669 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-scripts" (OuterVolumeSpecName: "scripts") pod "1b4fbadd-8296-4437-9419-d0c94991808b" (UID: "1b4fbadd-8296-4437-9419-d0c94991808b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.657886 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cl7pz\" (UniqueName: \"kubernetes.io/projected/1b4fbadd-8296-4437-9419-d0c94991808b-kube-api-access-cl7pz\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.658004 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.685507 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b4fbadd-8296-4437-9419-d0c94991808b" (UID: "1b4fbadd-8296-4437-9419-d0c94991808b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.713100 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-config-data" (OuterVolumeSpecName: "config-data") pod "1b4fbadd-8296-4437-9419-d0c94991808b" (UID: "1b4fbadd-8296-4437-9419-d0c94991808b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.760057 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:30 crc kubenswrapper[4767]: I0128 19:20:30.760099 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1b4fbadd-8296-4437-9419-d0c94991808b-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.379728 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"1b4fbadd-8296-4437-9419-d0c94991808b","Type":"ContainerDied","Data":"b6e0cd2ad66b88987771c715ecb71bb427ea31d00b166e821bd4524080de1d8d"} Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.380643 4767 scope.go:117] "RemoveContainer" containerID="ba93ca1eef5716b4c71ee92e0c9a3a948f9e590ea7b30a7ad593167f4f4a4408" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.380011 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.410901 4767 scope.go:117] "RemoveContainer" containerID="35e53601bb80176b5b7e29a394eeb42f83fe5f43b3111cffdf13ef836e36b58c" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.418669 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.430987 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.440013 4767 scope.go:117] "RemoveContainer" containerID="bb1d31e891fbaccd45ced725b7ec1afbb0d8dad4b7abcebf2f5687f7b4e121dd" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.444735 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 19:20:31 crc kubenswrapper[4767]: E0128 19:20:31.445459 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-notifier" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.445553 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-notifier" Jan 28 19:20:31 crc kubenswrapper[4767]: E0128 19:20:31.445653 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-api" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.445733 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-api" Jan 28 19:20:31 crc kubenswrapper[4767]: E0128 19:20:31.445795 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-evaluator" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.445847 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-evaluator" Jan 28 19:20:31 crc kubenswrapper[4767]: E0128 19:20:31.445916 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-listener" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.445990 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-listener" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.446310 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-listener" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.446408 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-evaluator" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.446477 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-notifier" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.446535 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" containerName="aodh-api" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.449126 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.459997 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.460287 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.460661 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-chl48" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.461926 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.462422 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.469034 4767 scope.go:117] "RemoveContainer" containerID="48893babeb8d2fb2415c7fed1650433fb26b8f8414c16398df40b0e5fd7f4041" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.472509 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.476710 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-internal-tls-certs\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.476835 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-config-data\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.476896 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-public-tls-certs\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.476971 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8tmgf\" (UniqueName: \"kubernetes.io/projected/8e116b68-2fd9-44b4-9a15-356d5126fee0-kube-api-access-8tmgf\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.477022 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.477225 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-scripts\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.579194 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-scripts\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.579532 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-internal-tls-certs\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.579585 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-config-data\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.579623 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-public-tls-certs\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.579669 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8tmgf\" (UniqueName: \"kubernetes.io/projected/8e116b68-2fd9-44b4-9a15-356d5126fee0-kube-api-access-8tmgf\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.579701 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.584120 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-scripts\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.585551 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-combined-ca-bundle\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.591099 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-internal-tls-certs\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.591528 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-config-data\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.591944 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-public-tls-certs\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.599951 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8tmgf\" (UniqueName: \"kubernetes.io/projected/8e116b68-2fd9-44b4-9a15-356d5126fee0-kube-api-access-8tmgf\") pod \"aodh-0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " pod="openstack/aodh-0" Jan 28 19:20:31 crc kubenswrapper[4767]: I0128 19:20:31.779384 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:20:32 crc kubenswrapper[4767]: I0128 19:20:32.263611 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:20:32 crc kubenswrapper[4767]: I0128 19:20:32.392711 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerStarted","Data":"31d838722803d77810d6de9195bfd627d4be16b3b612ab279d10a0ca06ad63d1"} Jan 28 19:20:32 crc kubenswrapper[4767]: I0128 19:20:32.807683 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b4fbadd-8296-4437-9419-d0c94991808b" path="/var/lib/kubelet/pods/1b4fbadd-8296-4437-9419-d0c94991808b/volumes" Jan 28 19:20:33 crc kubenswrapper[4767]: I0128 19:20:33.406805 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerStarted","Data":"d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89"} Jan 28 19:20:34 crc kubenswrapper[4767]: I0128 19:20:34.423459 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerStarted","Data":"624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac"} Jan 28 19:20:35 crc kubenswrapper[4767]: I0128 19:20:35.441029 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerStarted","Data":"c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61"} Jan 28 19:20:35 crc kubenswrapper[4767]: I0128 19:20:35.442616 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerStarted","Data":"f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07"} Jan 28 19:20:35 crc kubenswrapper[4767]: I0128 19:20:35.481009 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=1.722561796 podStartE2EDuration="4.480979875s" podCreationTimestamp="2026-01-28 19:20:31 +0000 UTC" firstStartedPulling="2026-01-28 19:20:32.275036591 +0000 UTC m=+3038.239219465" lastFinishedPulling="2026-01-28 19:20:35.03345467 +0000 UTC m=+3040.997637544" observedRunningTime="2026-01-28 19:20:35.467840362 +0000 UTC m=+3041.432023236" watchObservedRunningTime="2026-01-28 19:20:35.480979875 +0000 UTC m=+3041.445162759" Jan 28 19:20:36 crc kubenswrapper[4767]: I0128 19:20:36.879525 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 28 19:20:40 crc kubenswrapper[4767]: I0128 19:20:40.796463 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:20:40 crc kubenswrapper[4767]: E0128 19:20:40.797354 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:20:51 crc kubenswrapper[4767]: I0128 19:20:51.795162 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:20:51 crc kubenswrapper[4767]: E0128 19:20:51.796115 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:21:02 crc kubenswrapper[4767]: I0128 19:21:02.796688 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:21:02 crc kubenswrapper[4767]: E0128 19:21:02.797926 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.113303 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-jrcmp"] Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.117442 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.138622 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrcmp"] Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.308246 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-catalog-content\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.308756 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-utilities\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.308899 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4ljb\" (UniqueName: \"kubernetes.io/projected/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-kube-api-access-z4ljb\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.411026 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-catalog-content\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.411607 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-utilities\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.411705 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4ljb\" (UniqueName: \"kubernetes.io/projected/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-kube-api-access-z4ljb\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.412311 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-utilities\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.412217 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-catalog-content\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.435908 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4ljb\" (UniqueName: \"kubernetes.io/projected/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-kube-api-access-z4ljb\") pod \"redhat-marketplace-jrcmp\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:13 crc kubenswrapper[4767]: I0128 19:21:13.460311 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:14 crc kubenswrapper[4767]: I0128 19:21:14.016345 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrcmp"] Jan 28 19:21:14 crc kubenswrapper[4767]: I0128 19:21:14.835242 4767 generic.go:334] "Generic (PLEG): container finished" podID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerID="9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7" exitCode=0 Jan 28 19:21:14 crc kubenswrapper[4767]: I0128 19:21:14.835385 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrcmp" event={"ID":"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc","Type":"ContainerDied","Data":"9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7"} Jan 28 19:21:14 crc kubenswrapper[4767]: I0128 19:21:14.835688 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrcmp" event={"ID":"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc","Type":"ContainerStarted","Data":"822bf95ef965d1b969581eef49002dc885af56ce8941da0a78d51d0702fad311"} Jan 28 19:21:15 crc kubenswrapper[4767]: I0128 19:21:15.796828 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:21:15 crc kubenswrapper[4767]: E0128 19:21:15.797549 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:21:16 crc kubenswrapper[4767]: I0128 19:21:16.857424 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrcmp" event={"ID":"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc","Type":"ContainerStarted","Data":"e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba"} Jan 28 19:21:17 crc kubenswrapper[4767]: I0128 19:21:17.869990 4767 generic.go:334] "Generic (PLEG): container finished" podID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerID="e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba" exitCode=0 Jan 28 19:21:17 crc kubenswrapper[4767]: I0128 19:21:17.870538 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrcmp" event={"ID":"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc","Type":"ContainerDied","Data":"e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba"} Jan 28 19:21:19 crc kubenswrapper[4767]: I0128 19:21:19.896337 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrcmp" event={"ID":"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc","Type":"ContainerStarted","Data":"dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892"} Jan 28 19:21:19 crc kubenswrapper[4767]: I0128 19:21:19.923190 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-jrcmp" podStartSLOduration=2.996518378 podStartE2EDuration="6.923160739s" podCreationTimestamp="2026-01-28 19:21:13 +0000 UTC" firstStartedPulling="2026-01-28 19:21:14.839332061 +0000 UTC m=+3080.803514935" lastFinishedPulling="2026-01-28 19:21:18.765974422 +0000 UTC m=+3084.730157296" observedRunningTime="2026-01-28 19:21:19.916586803 +0000 UTC m=+3085.880769697" watchObservedRunningTime="2026-01-28 19:21:19.923160739 +0000 UTC m=+3085.887343613" Jan 28 19:21:23 crc kubenswrapper[4767]: I0128 19:21:23.461139 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:23 crc kubenswrapper[4767]: I0128 19:21:23.461864 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:23 crc kubenswrapper[4767]: I0128 19:21:23.513950 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:28 crc kubenswrapper[4767]: I0128 19:21:28.795914 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:21:28 crc kubenswrapper[4767]: E0128 19:21:28.798708 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:21:33 crc kubenswrapper[4767]: I0128 19:21:33.516715 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:33 crc kubenswrapper[4767]: I0128 19:21:33.575378 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrcmp"] Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.040428 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-jrcmp" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="registry-server" containerID="cri-o://dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892" gracePeriod=2 Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.610130 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.708539 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4ljb\" (UniqueName: \"kubernetes.io/projected/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-kube-api-access-z4ljb\") pod \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.708622 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-catalog-content\") pod \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.708679 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-utilities\") pod \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\" (UID: \"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc\") " Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.710008 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-utilities" (OuterVolumeSpecName: "utilities") pod "bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" (UID: "bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.716313 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-kube-api-access-z4ljb" (OuterVolumeSpecName: "kube-api-access-z4ljb") pod "bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" (UID: "bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc"). InnerVolumeSpecName "kube-api-access-z4ljb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.730786 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" (UID: "bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.810940 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.811355 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4ljb\" (UniqueName: \"kubernetes.io/projected/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-kube-api-access-z4ljb\") on node \"crc\" DevicePath \"\"" Jan 28 19:21:34 crc kubenswrapper[4767]: I0128 19:21:34.811452 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:21:35 crc kubenswrapper[4767]: E0128 19:21:35.026271 4767 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbdd8c990_a47b_4535_abc8_3a4dc1ef1cbc.slice/crio-822bf95ef965d1b969581eef49002dc885af56ce8941da0a78d51d0702fad311\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbdd8c990_a47b_4535_abc8_3a4dc1ef1cbc.slice\": RecentStats: unable to find data in memory cache]" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.053405 4767 generic.go:334] "Generic (PLEG): container finished" podID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerID="dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892" exitCode=0 Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.053479 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-jrcmp" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.053487 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrcmp" event={"ID":"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc","Type":"ContainerDied","Data":"dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892"} Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.053585 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-jrcmp" event={"ID":"bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc","Type":"ContainerDied","Data":"822bf95ef965d1b969581eef49002dc885af56ce8941da0a78d51d0702fad311"} Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.053620 4767 scope.go:117] "RemoveContainer" containerID="dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.085091 4767 scope.go:117] "RemoveContainer" containerID="e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.085632 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrcmp"] Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.097531 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-jrcmp"] Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.121166 4767 scope.go:117] "RemoveContainer" containerID="9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.159234 4767 scope.go:117] "RemoveContainer" containerID="dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892" Jan 28 19:21:35 crc kubenswrapper[4767]: E0128 19:21:35.159905 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892\": container with ID starting with dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892 not found: ID does not exist" containerID="dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.159981 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892"} err="failed to get container status \"dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892\": rpc error: code = NotFound desc = could not find container \"dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892\": container with ID starting with dbe8f4682ad85a7d1124798dde40162e1e819e926c5fa8790ea38dc05f504892 not found: ID does not exist" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.160049 4767 scope.go:117] "RemoveContainer" containerID="e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba" Jan 28 19:21:35 crc kubenswrapper[4767]: E0128 19:21:35.160549 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba\": container with ID starting with e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba not found: ID does not exist" containerID="e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.160588 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba"} err="failed to get container status \"e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba\": rpc error: code = NotFound desc = could not find container \"e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba\": container with ID starting with e25b4192040c13aecf6e6702500cc5f4cc5ac3416a0e6638ed2debba1f6c21ba not found: ID does not exist" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.160606 4767 scope.go:117] "RemoveContainer" containerID="9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7" Jan 28 19:21:35 crc kubenswrapper[4767]: E0128 19:21:35.160946 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7\": container with ID starting with 9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7 not found: ID does not exist" containerID="9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7" Jan 28 19:21:35 crc kubenswrapper[4767]: I0128 19:21:35.160987 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7"} err="failed to get container status \"9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7\": rpc error: code = NotFound desc = could not find container \"9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7\": container with ID starting with 9004d24b1f4788df5707e1b3e3f7ebca1c5ce4822f971098fa8962af4bb853c7 not found: ID does not exist" Jan 28 19:21:36 crc kubenswrapper[4767]: I0128 19:21:36.327438 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-66c6598f9f-9w6r9" podUID="85ceb5d8-a7fe-4e66-a20f-6a309942c1fc" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 28 19:21:36 crc kubenswrapper[4767]: I0128 19:21:36.815593 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" path="/var/lib/kubelet/pods/bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc/volumes" Jan 28 19:21:40 crc kubenswrapper[4767]: I0128 19:21:40.796501 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:21:40 crc kubenswrapper[4767]: E0128 19:21:40.797503 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:21:52 crc kubenswrapper[4767]: I0128 19:21:52.795932 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:21:52 crc kubenswrapper[4767]: E0128 19:21:52.796735 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:22:03 crc kubenswrapper[4767]: I0128 19:22:03.795966 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:22:03 crc kubenswrapper[4767]: E0128 19:22:03.797017 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:22:17 crc kubenswrapper[4767]: I0128 19:22:17.795760 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:22:17 crc kubenswrapper[4767]: E0128 19:22:17.797119 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:22:32 crc kubenswrapper[4767]: I0128 19:22:32.796578 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:22:32 crc kubenswrapper[4767]: E0128 19:22:32.799098 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:22:45 crc kubenswrapper[4767]: I0128 19:22:45.796117 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:22:45 crc kubenswrapper[4767]: E0128 19:22:45.797168 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:22:57 crc kubenswrapper[4767]: I0128 19:22:57.796500 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:22:57 crc kubenswrapper[4767]: E0128 19:22:57.797195 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:23:12 crc kubenswrapper[4767]: I0128 19:23:12.795860 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:23:12 crc kubenswrapper[4767]: E0128 19:23:12.796674 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:23:24 crc kubenswrapper[4767]: I0128 19:23:24.803596 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:23:24 crc kubenswrapper[4767]: E0128 19:23:24.809296 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:23:30 crc kubenswrapper[4767]: I0128 19:23:30.914477 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-79f6f75b9c-dhf5c_0d7c50d3-1348-43e5-a8fa-f05cd53d2a42/manager/0.log" Jan 28 19:23:38 crc kubenswrapper[4767]: I0128 19:23:38.796408 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:23:38 crc kubenswrapper[4767]: E0128 19:23:38.797867 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.722043 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j"] Jan 28 19:23:44 crc kubenswrapper[4767]: E0128 19:23:44.723182 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="extract-content" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.723221 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="extract-content" Jan 28 19:23:44 crc kubenswrapper[4767]: E0128 19:23:44.723238 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="extract-utilities" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.723247 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="extract-utilities" Jan 28 19:23:44 crc kubenswrapper[4767]: E0128 19:23:44.723263 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="registry-server" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.723271 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="registry-server" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.723503 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="bdd8c990-a47b-4535-abc8-3a4dc1ef1cbc" containerName="registry-server" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.729074 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.731508 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.735702 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j"] Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.908216 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.908300 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:44 crc kubenswrapper[4767]: I0128 19:23:44.909011 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tmmvg\" (UniqueName: \"kubernetes.io/projected/e8dad657-0de1-4111-912b-f07ad63a264f-kube-api-access-tmmvg\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.010914 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tmmvg\" (UniqueName: \"kubernetes.io/projected/e8dad657-0de1-4111-912b-f07ad63a264f-kube-api-access-tmmvg\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.011057 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.011104 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.011767 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-util\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.012000 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-bundle\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.031163 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tmmvg\" (UniqueName: \"kubernetes.io/projected/e8dad657-0de1-4111-912b-f07ad63a264f-kube-api-access-tmmvg\") pod \"98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.065447 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.532502 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j"] Jan 28 19:23:45 crc kubenswrapper[4767]: W0128 19:23:45.534640 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode8dad657_0de1_4111_912b_f07ad63a264f.slice/crio-f4b9c293ae8a5d7970640bd670ae5818f73e5eafd925570debb49f14b97014fc WatchSource:0}: Error finding container f4b9c293ae8a5d7970640bd670ae5818f73e5eafd925570debb49f14b97014fc: Status 404 returned error can't find the container with id f4b9c293ae8a5d7970640bd670ae5818f73e5eafd925570debb49f14b97014fc Jan 28 19:23:45 crc kubenswrapper[4767]: I0128 19:23:45.616856 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" event={"ID":"e8dad657-0de1-4111-912b-f07ad63a264f","Type":"ContainerStarted","Data":"f4b9c293ae8a5d7970640bd670ae5818f73e5eafd925570debb49f14b97014fc"} Jan 28 19:23:46 crc kubenswrapper[4767]: I0128 19:23:46.627307 4767 generic.go:334] "Generic (PLEG): container finished" podID="e8dad657-0de1-4111-912b-f07ad63a264f" containerID="baa95c364f032242f8d9da33fad2489721a4f69a3e050005b0b72b8d942785a8" exitCode=0 Jan 28 19:23:46 crc kubenswrapper[4767]: I0128 19:23:46.627358 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" event={"ID":"e8dad657-0de1-4111-912b-f07ad63a264f","Type":"ContainerDied","Data":"baa95c364f032242f8d9da33fad2489721a4f69a3e050005b0b72b8d942785a8"} Jan 28 19:23:46 crc kubenswrapper[4767]: I0128 19:23:46.630505 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:23:48 crc kubenswrapper[4767]: I0128 19:23:48.651976 4767 generic.go:334] "Generic (PLEG): container finished" podID="e8dad657-0de1-4111-912b-f07ad63a264f" containerID="5e1eed177edc9917f591cd1115979b1ba9cf9e7dc611fe7953443f7ace989ec6" exitCode=0 Jan 28 19:23:48 crc kubenswrapper[4767]: I0128 19:23:48.652242 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" event={"ID":"e8dad657-0de1-4111-912b-f07ad63a264f","Type":"ContainerDied","Data":"5e1eed177edc9917f591cd1115979b1ba9cf9e7dc611fe7953443f7ace989ec6"} Jan 28 19:23:49 crc kubenswrapper[4767]: I0128 19:23:49.666265 4767 generic.go:334] "Generic (PLEG): container finished" podID="e8dad657-0de1-4111-912b-f07ad63a264f" containerID="ad98eb2f6b622d9a95e55330cf7dc0984e7bf6ec93aae7a63d3360d663e49f7d" exitCode=0 Jan 28 19:23:49 crc kubenswrapper[4767]: I0128 19:23:49.666369 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" event={"ID":"e8dad657-0de1-4111-912b-f07ad63a264f","Type":"ContainerDied","Data":"ad98eb2f6b622d9a95e55330cf7dc0984e7bf6ec93aae7a63d3360d663e49f7d"} Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.037440 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.169504 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-util\") pod \"e8dad657-0de1-4111-912b-f07ad63a264f\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.169653 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-bundle\") pod \"e8dad657-0de1-4111-912b-f07ad63a264f\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.169711 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tmmvg\" (UniqueName: \"kubernetes.io/projected/e8dad657-0de1-4111-912b-f07ad63a264f-kube-api-access-tmmvg\") pod \"e8dad657-0de1-4111-912b-f07ad63a264f\" (UID: \"e8dad657-0de1-4111-912b-f07ad63a264f\") " Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.174007 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-bundle" (OuterVolumeSpecName: "bundle") pod "e8dad657-0de1-4111-912b-f07ad63a264f" (UID: "e8dad657-0de1-4111-912b-f07ad63a264f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.182519 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-util" (OuterVolumeSpecName: "util") pod "e8dad657-0de1-4111-912b-f07ad63a264f" (UID: "e8dad657-0de1-4111-912b-f07ad63a264f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.183248 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8dad657-0de1-4111-912b-f07ad63a264f-kube-api-access-tmmvg" (OuterVolumeSpecName: "kube-api-access-tmmvg") pod "e8dad657-0de1-4111-912b-f07ad63a264f" (UID: "e8dad657-0de1-4111-912b-f07ad63a264f"). InnerVolumeSpecName "kube-api-access-tmmvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.272704 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tmmvg\" (UniqueName: \"kubernetes.io/projected/e8dad657-0de1-4111-912b-f07ad63a264f-kube-api-access-tmmvg\") on node \"crc\" DevicePath \"\"" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.272747 4767 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-util\") on node \"crc\" DevicePath \"\"" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.272759 4767 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/e8dad657-0de1-4111-912b-f07ad63a264f-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.691899 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" event={"ID":"e8dad657-0de1-4111-912b-f07ad63a264f","Type":"ContainerDied","Data":"f4b9c293ae8a5d7970640bd670ae5818f73e5eafd925570debb49f14b97014fc"} Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.692326 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4b9c293ae8a5d7970640bd670ae5818f73e5eafd925570debb49f14b97014fc" Jan 28 19:23:51 crc kubenswrapper[4767]: I0128 19:23:51.692407 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j" Jan 28 19:23:52 crc kubenswrapper[4767]: I0128 19:23:52.796087 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:23:52 crc kubenswrapper[4767]: E0128 19:23:52.796526 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.199107 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks"] Jan 28 19:24:03 crc kubenswrapper[4767]: E0128 19:24:03.200760 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8dad657-0de1-4111-912b-f07ad63a264f" containerName="extract" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.200783 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8dad657-0de1-4111-912b-f07ad63a264f" containerName="extract" Jan 28 19:24:03 crc kubenswrapper[4767]: E0128 19:24:03.200812 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8dad657-0de1-4111-912b-f07ad63a264f" containerName="pull" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.200819 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8dad657-0de1-4111-912b-f07ad63a264f" containerName="pull" Jan 28 19:24:03 crc kubenswrapper[4767]: E0128 19:24:03.200834 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8dad657-0de1-4111-912b-f07ad63a264f" containerName="util" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.200842 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8dad657-0de1-4111-912b-f07ad63a264f" containerName="util" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.201114 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8dad657-0de1-4111-912b-f07ad63a264f" containerName="extract" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.202479 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.215415 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.215603 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-6pxmm" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.215752 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.221278 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.229227 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzzvf\" (UniqueName: \"kubernetes.io/projected/471116e2-94a5-4d2f-bac3-0c312652ae8c-kube-api-access-wzzvf\") pod \"obo-prometheus-operator-68bc856cb9-jcpks\" (UID: \"471116e2-94a5-4d2f-bac3-0c312652ae8c\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.317960 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.320446 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.326984 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.327467 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-97vqx" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.332921 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzzvf\" (UniqueName: \"kubernetes.io/projected/471116e2-94a5-4d2f-bac3-0c312652ae8c-kube-api-access-wzzvf\") pod \"obo-prometheus-operator-68bc856cb9-jcpks\" (UID: \"471116e2-94a5-4d2f-bac3-0c312652ae8c\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.333088 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8f08ca15-124c-476c-b9e1-1002aa7edfd7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl\" (UID: \"8f08ca15-124c-476c-b9e1-1002aa7edfd7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.333190 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8f08ca15-124c-476c-b9e1-1002aa7edfd7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl\" (UID: \"8f08ca15-124c-476c-b9e1-1002aa7edfd7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.347822 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.365220 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzzvf\" (UniqueName: \"kubernetes.io/projected/471116e2-94a5-4d2f-bac3-0c312652ae8c-kube-api-access-wzzvf\") pod \"obo-prometheus-operator-68bc856cb9-jcpks\" (UID: \"471116e2-94a5-4d2f-bac3-0c312652ae8c\") " pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.385672 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.387716 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.419375 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.436762 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8f08ca15-124c-476c-b9e1-1002aa7edfd7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl\" (UID: \"8f08ca15-124c-476c-b9e1-1002aa7edfd7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.436972 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c2ee930f-1338-483a-aa28-eaecde4404cb-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2\" (UID: \"c2ee930f-1338-483a-aa28-eaecde4404cb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.437020 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c2ee930f-1338-483a-aa28-eaecde4404cb-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2\" (UID: \"c2ee930f-1338-483a-aa28-eaecde4404cb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.438132 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8f08ca15-124c-476c-b9e1-1002aa7edfd7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl\" (UID: \"8f08ca15-124c-476c-b9e1-1002aa7edfd7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.444036 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/8f08ca15-124c-476c-b9e1-1002aa7edfd7-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl\" (UID: \"8f08ca15-124c-476c-b9e1-1002aa7edfd7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.448487 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/8f08ca15-124c-476c-b9e1-1002aa7edfd7-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl\" (UID: \"8f08ca15-124c-476c-b9e1-1002aa7edfd7\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.511823 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2kxrn"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.513849 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.528037 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-tq5r8" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.528497 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.540999 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2kxrn\" (UID: \"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf\") " pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.541249 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c2ee930f-1338-483a-aa28-eaecde4404cb-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2\" (UID: \"c2ee930f-1338-483a-aa28-eaecde4404cb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.541298 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c2ee930f-1338-483a-aa28-eaecde4404cb-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2\" (UID: \"c2ee930f-1338-483a-aa28-eaecde4404cb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.541367 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqcbm\" (UniqueName: \"kubernetes.io/projected/e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf-kube-api-access-qqcbm\") pod \"observability-operator-59bdc8b94-2kxrn\" (UID: \"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf\") " pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.542996 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.563957 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/c2ee930f-1338-483a-aa28-eaecde4404cb-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2\" (UID: \"c2ee930f-1338-483a-aa28-eaecde4404cb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.566369 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2kxrn"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.582522 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/c2ee930f-1338-483a-aa28-eaecde4404cb-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2\" (UID: \"c2ee930f-1338-483a-aa28-eaecde4404cb\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.643231 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2kxrn\" (UID: \"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf\") " pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.643991 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqcbm\" (UniqueName: \"kubernetes.io/projected/e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf-kube-api-access-qqcbm\") pod \"observability-operator-59bdc8b94-2kxrn\" (UID: \"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf\") " pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.650697 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf-observability-operator-tls\") pod \"observability-operator-59bdc8b94-2kxrn\" (UID: \"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf\") " pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.675308 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqcbm\" (UniqueName: \"kubernetes.io/projected/e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf-kube-api-access-qqcbm\") pod \"observability-operator-59bdc8b94-2kxrn\" (UID: \"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf\") " pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.680915 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.686915 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-pkkhl"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.688518 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.705574 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-wgwfm" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.751174 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gpvnv\" (UniqueName: \"kubernetes.io/projected/3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97-kube-api-access-gpvnv\") pod \"perses-operator-5bf474d74f-pkkhl\" (UID: \"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97\") " pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.751817 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97-openshift-service-ca\") pod \"perses-operator-5bf474d74f-pkkhl\" (UID: \"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97\") " pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.760196 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-pkkhl"] Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.879836 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97-openshift-service-ca\") pod \"perses-operator-5bf474d74f-pkkhl\" (UID: \"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97\") " pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.880481 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gpvnv\" (UniqueName: \"kubernetes.io/projected/3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97-kube-api-access-gpvnv\") pod \"perses-operator-5bf474d74f-pkkhl\" (UID: \"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97\") " pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.885628 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.887672 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97-openshift-service-ca\") pod \"perses-operator-5bf474d74f-pkkhl\" (UID: \"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97\") " pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.919573 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gpvnv\" (UniqueName: \"kubernetes.io/projected/3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97-kube-api-access-gpvnv\") pod \"perses-operator-5bf474d74f-pkkhl\" (UID: \"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97\") " pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:03 crc kubenswrapper[4767]: I0128 19:24:03.969235 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.028927 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.193123 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks"] Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.453561 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl"] Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.628301 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2"] Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.808581 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:24:04 crc kubenswrapper[4767]: E0128 19:24:04.808947 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.882511 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5bf474d74f-pkkhl"] Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.955480 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" event={"ID":"c2ee930f-1338-483a-aa28-eaecde4404cb","Type":"ContainerStarted","Data":"fd50a50c20d2a4026891467166463bd536bbe7e90509e11a02583f279686916e"} Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.969502 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" event={"ID":"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97","Type":"ContainerStarted","Data":"bb4905e7b73d1ab6211966859ea8bc5dad4d757ec46778c24e98adad13df0c0a"} Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.973913 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" event={"ID":"471116e2-94a5-4d2f-bac3-0c312652ae8c","Type":"ContainerStarted","Data":"132634a2a2a2c9fd035c52c0eaf49aee458aea9146fbf324db85b284ed31dc95"} Jan 28 19:24:04 crc kubenswrapper[4767]: I0128 19:24:04.975528 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" event={"ID":"8f08ca15-124c-476c-b9e1-1002aa7edfd7","Type":"ContainerStarted","Data":"9011005220abc15c00b50eaae90f3e675952580d0451fea962993ee9520f629c"} Jan 28 19:24:05 crc kubenswrapper[4767]: I0128 19:24:05.007924 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-59bdc8b94-2kxrn"] Jan 28 19:24:06 crc kubenswrapper[4767]: I0128 19:24:06.015294 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" event={"ID":"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf","Type":"ContainerStarted","Data":"cdb509758b68fb4eb6fef96af63da457a69e2ec846ee0e2a9eb1309efa82b669"} Jan 28 19:24:19 crc kubenswrapper[4767]: I0128 19:24:19.255980 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:24:19 crc kubenswrapper[4767]: E0128 19:24:19.257199 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:24:19 crc kubenswrapper[4767]: I0128 19:24:19.313016 4767 trace.go:236] Trace[906717549]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/certified-operators-qf5wb" (28-Jan-2026 19:24:18.171) (total time: 1141ms): Jan 28 19:24:19 crc kubenswrapper[4767]: Trace[906717549]: [1.141641579s] [1.141641579s] END Jan 28 19:24:21 crc kubenswrapper[4767]: E0128 19:24:21.411468 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea" Jan 28 19:24:21 crc kubenswrapper[4767]: E0128 19:24:21.412211 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl_openshift-operators(8f08ca15-124c-476c-b9e1-1002aa7edfd7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 19:24:21 crc kubenswrapper[4767]: E0128 19:24:21.413423 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" podUID="8f08ca15-124c-476c-b9e1-1002aa7edfd7" Jan 28 19:24:21 crc kubenswrapper[4767]: E0128 19:24:21.462566 4767 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea" Jan 28 19:24:21 crc kubenswrapper[4767]: E0128 19:24:21.462805 4767 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator-admission-webhook,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea,Command:[],Args:[--web.enable-tls=true --web.cert-file=/tmp/k8s-webhook-server/serving-certs/tls.crt --web.key-file=/tmp/k8s-webhook-server/serving-certs/tls.key],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:https,HostPort:0,ContainerPort:8443,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.1,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{209715200 0} {} BinarySI},},Requests:ResourceList{cpu: {{50 -3} {} 50m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:apiservice-cert,ReadOnly:false,MountPath:/apiserver.local.config/certificates,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2_openshift-operators(c2ee930f-1338-483a-aa28-eaecde4404cb): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 28 19:24:21 crc kubenswrapper[4767]: E0128 19:24:21.464105 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" podUID="c2ee930f-1338-483a-aa28-eaecde4404cb" Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.321652 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" event={"ID":"3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97","Type":"ContainerStarted","Data":"cfa52b9d0249098c6b1a9d79ed29290c50e26fa7cd8968beaad5fb058a861cc7"} Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.322270 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.324899 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" event={"ID":"471116e2-94a5-4d2f-bac3-0c312652ae8c","Type":"ContainerStarted","Data":"af999d15b03f0066e830aaa14240aa1c54ffa2d67742c2a60a7f8e9bb8636cff"} Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.332585 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" event={"ID":"e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf","Type":"ContainerStarted","Data":"6d166db90e244f2b8af5d8097160b9be40ba62b9e5cbe763efea04c95a23c51a"} Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.332910 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:22 crc kubenswrapper[4767]: E0128 19:24:22.335745 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea\\\"\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" podUID="c2ee930f-1338-483a-aa28-eaecde4404cb" Jan 28 19:24:22 crc kubenswrapper[4767]: E0128 19:24:22.337964 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator-admission-webhook\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-admission-webhook-rhel9@sha256:42ebc3571195d8c41fd01b8d08e98fe2cc12c1caabea251aecb4442d8eade4ea\\\"\"" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" podUID="8f08ca15-124c-476c-b9e1-1002aa7edfd7" Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.349306 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.356698 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" podStartSLOduration=2.768005786 podStartE2EDuration="19.356671489s" podCreationTimestamp="2026-01-28 19:24:03 +0000 UTC" firstStartedPulling="2026-01-28 19:24:04.881298389 +0000 UTC m=+3250.845481263" lastFinishedPulling="2026-01-28 19:24:21.469964092 +0000 UTC m=+3267.434146966" observedRunningTime="2026-01-28 19:24:22.340711687 +0000 UTC m=+3268.304894571" watchObservedRunningTime="2026-01-28 19:24:22.356671489 +0000 UTC m=+3268.320854363" Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.383656 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-68bc856cb9-jcpks" podStartSLOduration=2.102344456 podStartE2EDuration="19.383632626s" podCreationTimestamp="2026-01-28 19:24:03 +0000 UTC" firstStartedPulling="2026-01-28 19:24:04.208158584 +0000 UTC m=+3250.172341458" lastFinishedPulling="2026-01-28 19:24:21.489446744 +0000 UTC m=+3267.453629628" observedRunningTime="2026-01-28 19:24:22.369801671 +0000 UTC m=+3268.333984545" watchObservedRunningTime="2026-01-28 19:24:22.383632626 +0000 UTC m=+3268.347815500" Jan 28 19:24:22 crc kubenswrapper[4767]: I0128 19:24:22.498633 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-59bdc8b94-2kxrn" podStartSLOduration=3.030725573 podStartE2EDuration="19.498608899s" podCreationTimestamp="2026-01-28 19:24:03 +0000 UTC" firstStartedPulling="2026-01-28 19:24:05.033053319 +0000 UTC m=+3250.997236193" lastFinishedPulling="2026-01-28 19:24:21.500936645 +0000 UTC m=+3267.465119519" observedRunningTime="2026-01-28 19:24:22.447653797 +0000 UTC m=+3268.411836671" watchObservedRunningTime="2026-01-28 19:24:22.498608899 +0000 UTC m=+3268.462791773" Jan 28 19:24:29 crc kubenswrapper[4767]: I0128 19:24:29.647310 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:24:29 crc kubenswrapper[4767]: I0128 19:24:29.649141 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-api" containerID="cri-o://d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89" gracePeriod=30 Jan 28 19:24:29 crc kubenswrapper[4767]: I0128 19:24:29.649501 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-listener" containerID="cri-o://c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61" gracePeriod=30 Jan 28 19:24:29 crc kubenswrapper[4767]: I0128 19:24:29.649645 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-notifier" containerID="cri-o://f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07" gracePeriod=30 Jan 28 19:24:29 crc kubenswrapper[4767]: I0128 19:24:29.649818 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-evaluator" containerID="cri-o://624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac" gracePeriod=30 Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.310583 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.313607 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.318081 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-web-config" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.321009 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-tls-assets-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.321138 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-generated" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.321640 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-alertmanager-dockercfg-2vtrm" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.321797 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"alertmanager-metric-storage-cluster-tls-config" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.354122 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.423481 4767 generic.go:334] "Generic (PLEG): container finished" podID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerID="624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac" exitCode=0 Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.423522 4767 generic.go:334] "Generic (PLEG): container finished" podID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerID="d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89" exitCode=0 Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.423544 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerDied","Data":"624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac"} Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.423579 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerDied","Data":"d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89"} Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.519161 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.519263 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.519356 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.519385 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.519419 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.519462 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.519503 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2k6jk\" (UniqueName: \"kubernetes.io/projected/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-kube-api-access-2k6jk\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.621353 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.621734 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.621868 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.622003 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.622143 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2k6jk\" (UniqueName: \"kubernetes.io/projected/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-kube-api-access-2k6jk\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.622297 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.622447 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.623533 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"alertmanager-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-alertmanager-metric-storage-db\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.629308 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-tls-assets\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.629383 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-config-out\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.630027 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-config-volume\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.637002 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cluster-tls-config\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-cluster-tls-config\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.637562 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-web-config\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.642029 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2k6jk\" (UniqueName: \"kubernetes.io/projected/24ff0e29-e1a3-43b3-8fc1-1d9e46fae486-kube-api-access-2k6jk\") pod \"alertmanager-metric-storage-0\" (UID: \"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486\") " pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.655331 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.886856 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.899386 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.904020 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.904281 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.904543 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.904690 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.904721 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.904867 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.904983 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-b9jgx" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.907432 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 19:24:30 crc kubenswrapper[4767]: I0128 19:24:30.907634 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.031963 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.032528 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.032550 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.032844 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.033126 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.033318 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc528c55-e822-485c-a824-9a4c15689009-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.033449 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.033558 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-42r4c\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-kube-api-access-42r4c\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.033611 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.033673 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.138656 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-42r4c\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-kube-api-access-42r4c\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.138829 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.139467 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.139647 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.140119 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.140318 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.140353 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.140558 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.140657 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.140748 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc528c55-e822-485c-a824-9a4c15689009-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.140828 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.141080 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.141657 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.141761 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.146858 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.153029 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-config\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.153029 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.153374 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.169074 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc528c55-e822-485c-a824-9a4c15689009-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.188942 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/alertmanager-metric-storage-0"] Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.204646 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-42r4c\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-kube-api-access-42r4c\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.238337 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.433681 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486","Type":"ContainerStarted","Data":"10a4f731fa068dfc421226309f945e645433b9f29d71ead1c8f64828c306adce"} Jan 28 19:24:31 crc kubenswrapper[4767]: I0128 19:24:31.529233 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:24:32 crc kubenswrapper[4767]: I0128 19:24:32.193160 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:24:32 crc kubenswrapper[4767]: I0128 19:24:32.454176 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerStarted","Data":"490491512ead3ad3db50e6cb19cc3345ddaa7c478cddfb1c82a0e49b4983acb6"} Jan 28 19:24:32 crc kubenswrapper[4767]: I0128 19:24:32.461397 4767 generic.go:334] "Generic (PLEG): container finished" podID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerID="f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07" exitCode=0 Jan 28 19:24:32 crc kubenswrapper[4767]: I0128 19:24:32.461470 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerDied","Data":"f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07"} Jan 28 19:24:32 crc kubenswrapper[4767]: I0128 19:24:32.796043 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:24:32 crc kubenswrapper[4767]: E0128 19:24:32.796476 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.291074 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.421770 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tmgf\" (UniqueName: \"kubernetes.io/projected/8e116b68-2fd9-44b4-9a15-356d5126fee0-kube-api-access-8tmgf\") pod \"8e116b68-2fd9-44b4-9a15-356d5126fee0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.421927 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-public-tls-certs\") pod \"8e116b68-2fd9-44b4-9a15-356d5126fee0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.422016 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-internal-tls-certs\") pod \"8e116b68-2fd9-44b4-9a15-356d5126fee0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.422131 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-config-data\") pod \"8e116b68-2fd9-44b4-9a15-356d5126fee0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.422374 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-combined-ca-bundle\") pod \"8e116b68-2fd9-44b4-9a15-356d5126fee0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.423368 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-scripts\") pod \"8e116b68-2fd9-44b4-9a15-356d5126fee0\" (UID: \"8e116b68-2fd9-44b4-9a15-356d5126fee0\") " Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.434834 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-scripts" (OuterVolumeSpecName: "scripts") pod "8e116b68-2fd9-44b4-9a15-356d5126fee0" (UID: "8e116b68-2fd9-44b4-9a15-356d5126fee0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.440784 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8e116b68-2fd9-44b4-9a15-356d5126fee0-kube-api-access-8tmgf" (OuterVolumeSpecName: "kube-api-access-8tmgf") pod "8e116b68-2fd9-44b4-9a15-356d5126fee0" (UID: "8e116b68-2fd9-44b4-9a15-356d5126fee0"). InnerVolumeSpecName "kube-api-access-8tmgf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.480006 4767 generic.go:334] "Generic (PLEG): container finished" podID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerID="c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61" exitCode=0 Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.480283 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerDied","Data":"c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61"} Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.480336 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.480512 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"8e116b68-2fd9-44b4-9a15-356d5126fee0","Type":"ContainerDied","Data":"31d838722803d77810d6de9195bfd627d4be16b3b612ab279d10a0ca06ad63d1"} Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.480529 4767 scope.go:117] "RemoveContainer" containerID="c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.506725 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8e116b68-2fd9-44b4-9a15-356d5126fee0" (UID: "8e116b68-2fd9-44b4-9a15-356d5126fee0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.510402 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8e116b68-2fd9-44b4-9a15-356d5126fee0" (UID: "8e116b68-2fd9-44b4-9a15-356d5126fee0"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.526445 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.526486 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tmgf\" (UniqueName: \"kubernetes.io/projected/8e116b68-2fd9-44b4-9a15-356d5126fee0-kube-api-access-8tmgf\") on node \"crc\" DevicePath \"\"" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.526503 4767 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.526515 4767 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.582153 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-config-data" (OuterVolumeSpecName: "config-data") pod "8e116b68-2fd9-44b4-9a15-356d5126fee0" (UID: "8e116b68-2fd9-44b4-9a15-356d5126fee0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.615189 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8e116b68-2fd9-44b4-9a15-356d5126fee0" (UID: "8e116b68-2fd9-44b4-9a15-356d5126fee0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.628467 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.628524 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8e116b68-2fd9-44b4-9a15-356d5126fee0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.656318 4767 scope.go:117] "RemoveContainer" containerID="f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.682275 4767 scope.go:117] "RemoveContainer" containerID="624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.705460 4767 scope.go:117] "RemoveContainer" containerID="d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.857753 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.877690 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.890363 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 19:24:33 crc kubenswrapper[4767]: E0128 19:24:33.891104 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-api" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891142 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-api" Jan 28 19:24:33 crc kubenswrapper[4767]: E0128 19:24:33.891181 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-listener" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891194 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-listener" Jan 28 19:24:33 crc kubenswrapper[4767]: E0128 19:24:33.891271 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-notifier" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891283 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-notifier" Jan 28 19:24:33 crc kubenswrapper[4767]: E0128 19:24:33.891315 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-evaluator" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891326 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-evaluator" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891608 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-evaluator" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891646 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-notifier" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891664 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-listener" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.891681 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" containerName="aodh-api" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.894477 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.902538 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.907007 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.907491 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-chl48" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.907694 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.907829 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 19:24:33 crc kubenswrapper[4767]: I0128 19:24:33.908024 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.031866 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5bf474d74f-pkkhl" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.044892 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-config-data\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.045008 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-scripts\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.045117 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-public-tls-certs\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.045169 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.045197 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd699\" (UniqueName: \"kubernetes.io/projected/0b821092-04ee-43e7-beba-8cd160857435-kube-api-access-sd699\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.045568 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.147784 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-public-tls-certs\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.147865 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.147886 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd699\" (UniqueName: \"kubernetes.io/projected/0b821092-04ee-43e7-beba-8cd160857435-kube-api-access-sd699\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.147939 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.148066 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-config-data\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.148133 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-scripts\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.154578 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.154802 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-combined-ca-bundle\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.157015 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-scripts\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.158110 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-public-tls-certs\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.160443 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-config-data\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.172964 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd699\" (UniqueName: \"kubernetes.io/projected/0b821092-04ee-43e7-beba-8cd160857435-kube-api-access-sd699\") pod \"aodh-0\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.283653 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.549743 4767 scope.go:117] "RemoveContainer" containerID="c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61" Jan 28 19:24:34 crc kubenswrapper[4767]: E0128 19:24:34.561871 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61\": container with ID starting with c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61 not found: ID does not exist" containerID="c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.561945 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61"} err="failed to get container status \"c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61\": rpc error: code = NotFound desc = could not find container \"c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61\": container with ID starting with c6bf8a2de014268209e919213c9de88a622c3f1634699ae79156fd06cb892e61 not found: ID does not exist" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.561984 4767 scope.go:117] "RemoveContainer" containerID="f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07" Jan 28 19:24:34 crc kubenswrapper[4767]: E0128 19:24:34.562456 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07\": container with ID starting with f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07 not found: ID does not exist" containerID="f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.562544 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07"} err="failed to get container status \"f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07\": rpc error: code = NotFound desc = could not find container \"f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07\": container with ID starting with f54450300c80c9ccb9ac25ab990c656008f7f9dcdbcacbc92c9f49b14708ff07 not found: ID does not exist" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.562608 4767 scope.go:117] "RemoveContainer" containerID="624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac" Jan 28 19:24:34 crc kubenswrapper[4767]: E0128 19:24:34.564679 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac\": container with ID starting with 624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac not found: ID does not exist" containerID="624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.564769 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac"} err="failed to get container status \"624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac\": rpc error: code = NotFound desc = could not find container \"624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac\": container with ID starting with 624ec832ce84ea8298020f400a8a082e0dfade2634803c4fc1c467d710540eac not found: ID does not exist" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.564821 4767 scope.go:117] "RemoveContainer" containerID="d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89" Jan 28 19:24:34 crc kubenswrapper[4767]: E0128 19:24:34.571649 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89\": container with ID starting with d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89 not found: ID does not exist" containerID="d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.571711 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89"} err="failed to get container status \"d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89\": rpc error: code = NotFound desc = could not find container \"d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89\": container with ID starting with d6e10a3d8097de28937ade80772d2a703498f0e878e077c54843b1015430ff89 not found: ID does not exist" Jan 28 19:24:34 crc kubenswrapper[4767]: I0128 19:24:34.834337 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8e116b68-2fd9-44b4-9a15-356d5126fee0" path="/var/lib/kubelet/pods/8e116b68-2fd9-44b4-9a15-356d5126fee0/volumes" Jan 28 19:24:35 crc kubenswrapper[4767]: I0128 19:24:35.241103 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:24:35 crc kubenswrapper[4767]: I0128 19:24:35.511503 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerStarted","Data":"debf48c9771816c8ae5c04d316f26d45601641ec28119744e7d46607b7d49e80"} Jan 28 19:24:36 crc kubenswrapper[4767]: I0128 19:24:36.523257 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" event={"ID":"c2ee930f-1338-483a-aa28-eaecde4404cb","Type":"ContainerStarted","Data":"610dd7a5ba398d4f186874ff9698c9f1535bd67aabc37d2deb93750a6dd5c0b5"} Jan 28 19:24:36 crc kubenswrapper[4767]: I0128 19:24:36.529470 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerStarted","Data":"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda"} Jan 28 19:24:36 crc kubenswrapper[4767]: I0128 19:24:36.565821 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2" podStartSLOduration=2.362441819 podStartE2EDuration="33.565800198s" podCreationTimestamp="2026-01-28 19:24:03 +0000 UTC" firstStartedPulling="2026-01-28 19:24:04.639416547 +0000 UTC m=+3250.603599421" lastFinishedPulling="2026-01-28 19:24:35.842774926 +0000 UTC m=+3281.806957800" observedRunningTime="2026-01-28 19:24:36.556065162 +0000 UTC m=+3282.520248036" watchObservedRunningTime="2026-01-28 19:24:36.565800198 +0000 UTC m=+3282.529983072" Jan 28 19:24:39 crc kubenswrapper[4767]: I0128 19:24:39.566601 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerStarted","Data":"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3"} Jan 28 19:24:39 crc kubenswrapper[4767]: I0128 19:24:39.569192 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" event={"ID":"8f08ca15-124c-476c-b9e1-1002aa7edfd7","Type":"ContainerStarted","Data":"17942bd5f9d0c079da6a30902f07ecbbeff867aba05f238a3a16b1eb5af099d6"} Jan 28 19:24:39 crc kubenswrapper[4767]: I0128 19:24:39.714630 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl" podStartSLOduration=-9223372000.140175 podStartE2EDuration="36.714601886s" podCreationTimestamp="2026-01-28 19:24:03 +0000 UTC" firstStartedPulling="2026-01-28 19:24:04.473300777 +0000 UTC m=+3250.437483641" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:24:39.608112569 +0000 UTC m=+3285.572295443" watchObservedRunningTime="2026-01-28 19:24:39.714601886 +0000 UTC m=+3285.678784760" Jan 28 19:24:40 crc kubenswrapper[4767]: I0128 19:24:40.587192 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerStarted","Data":"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f"} Jan 28 19:24:40 crc kubenswrapper[4767]: I0128 19:24:40.595221 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486","Type":"ContainerStarted","Data":"bd98fe149aadae872e236cf1b3bf06ce06fa8dec430f4ff19de8624d56d3718e"} Jan 28 19:24:40 crc kubenswrapper[4767]: I0128 19:24:40.611393 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerStarted","Data":"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8"} Jan 28 19:24:41 crc kubenswrapper[4767]: I0128 19:24:41.634152 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerStarted","Data":"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40"} Jan 28 19:24:41 crc kubenswrapper[4767]: I0128 19:24:41.659555 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=3.026191199 podStartE2EDuration="8.659522609s" podCreationTimestamp="2026-01-28 19:24:33 +0000 UTC" firstStartedPulling="2026-01-28 19:24:35.255362064 +0000 UTC m=+3281.219544938" lastFinishedPulling="2026-01-28 19:24:40.888693474 +0000 UTC m=+3286.852876348" observedRunningTime="2026-01-28 19:24:41.659420165 +0000 UTC m=+3287.623603049" watchObservedRunningTime="2026-01-28 19:24:41.659522609 +0000 UTC m=+3287.623705493" Jan 28 19:24:43 crc kubenswrapper[4767]: I0128 19:24:43.796979 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:24:43 crc kubenswrapper[4767]: E0128 19:24:43.797966 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:24:47 crc kubenswrapper[4767]: I0128 19:24:47.716644 4767 generic.go:334] "Generic (PLEG): container finished" podID="24ff0e29-e1a3-43b3-8fc1-1d9e46fae486" containerID="bd98fe149aadae872e236cf1b3bf06ce06fa8dec430f4ff19de8624d56d3718e" exitCode=0 Jan 28 19:24:47 crc kubenswrapper[4767]: I0128 19:24:47.716796 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486","Type":"ContainerDied","Data":"bd98fe149aadae872e236cf1b3bf06ce06fa8dec430f4ff19de8624d56d3718e"} Jan 28 19:24:48 crc kubenswrapper[4767]: I0128 19:24:48.733439 4767 generic.go:334] "Generic (PLEG): container finished" podID="fc528c55-e822-485c-a824-9a4c15689009" containerID="3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f" exitCode=0 Jan 28 19:24:48 crc kubenswrapper[4767]: I0128 19:24:48.733541 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerDied","Data":"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f"} Jan 28 19:24:51 crc kubenswrapper[4767]: I0128 19:24:51.790095 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486","Type":"ContainerStarted","Data":"d928f024e98215675403a46a87b0dc47e69b462cc6b8d6f3da4f482323ec0285"} Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.142269 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lm9nc"] Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.145048 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.159743 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lm9nc"] Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.294829 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-utilities\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.296158 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-catalog-content\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.296294 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pqs8\" (UniqueName: \"kubernetes.io/projected/56a7720b-a6a0-432a-bd24-eaab9a30780a-kube-api-access-7pqs8\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.399005 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-catalog-content\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.399067 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pqs8\" (UniqueName: \"kubernetes.io/projected/56a7720b-a6a0-432a-bd24-eaab9a30780a-kube-api-access-7pqs8\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.399146 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-utilities\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.399668 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-catalog-content\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.399952 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-utilities\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.421264 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pqs8\" (UniqueName: \"kubernetes.io/projected/56a7720b-a6a0-432a-bd24-eaab9a30780a-kube-api-access-7pqs8\") pod \"certified-operators-lm9nc\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:53 crc kubenswrapper[4767]: I0128 19:24:53.493631 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:24:55 crc kubenswrapper[4767]: I0128 19:24:55.855506 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/alertmanager-metric-storage-0" event={"ID":"24ff0e29-e1a3-43b3-8fc1-1d9e46fae486","Type":"ContainerStarted","Data":"aabffa286854584035879c899cd71efe736f640cd7f3440648ebef75850a6923"} Jan 28 19:24:55 crc kubenswrapper[4767]: I0128 19:24:55.856234 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:55 crc kubenswrapper[4767]: I0128 19:24:55.860971 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/alertmanager-metric-storage-0" Jan 28 19:24:55 crc kubenswrapper[4767]: I0128 19:24:55.890086 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/alertmanager-metric-storage-0" podStartSLOduration=6.481066535 podStartE2EDuration="25.890062212s" podCreationTimestamp="2026-01-28 19:24:30 +0000 UTC" firstStartedPulling="2026-01-28 19:24:31.186179602 +0000 UTC m=+3277.150362476" lastFinishedPulling="2026-01-28 19:24:50.595175279 +0000 UTC m=+3296.559358153" observedRunningTime="2026-01-28 19:24:55.879678985 +0000 UTC m=+3301.843861869" watchObservedRunningTime="2026-01-28 19:24:55.890062212 +0000 UTC m=+3301.854245086" Jan 28 19:24:56 crc kubenswrapper[4767]: I0128 19:24:56.269628 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lm9nc"] Jan 28 19:24:58 crc kubenswrapper[4767]: I0128 19:24:58.796297 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:24:58 crc kubenswrapper[4767]: W0128 19:24:58.959709 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56a7720b_a6a0_432a_bd24_eaab9a30780a.slice/crio-895c36e3a5d4ba16933bb18be77e08f86f3c1152e4e1a22c542b2f820053c45a WatchSource:0}: Error finding container 895c36e3a5d4ba16933bb18be77e08f86f3c1152e4e1a22c542b2f820053c45a: Status 404 returned error can't find the container with id 895c36e3a5d4ba16933bb18be77e08f86f3c1152e4e1a22c542b2f820053c45a Jan 28 19:24:59 crc kubenswrapper[4767]: I0128 19:24:59.904965 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerStarted","Data":"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8"} Jan 28 19:24:59 crc kubenswrapper[4767]: I0128 19:24:59.906986 4767 generic.go:334] "Generic (PLEG): container finished" podID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerID="1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827" exitCode=0 Jan 28 19:24:59 crc kubenswrapper[4767]: I0128 19:24:59.907040 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm9nc" event={"ID":"56a7720b-a6a0-432a-bd24-eaab9a30780a","Type":"ContainerDied","Data":"1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827"} Jan 28 19:24:59 crc kubenswrapper[4767]: I0128 19:24:59.907097 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm9nc" event={"ID":"56a7720b-a6a0-432a-bd24-eaab9a30780a","Type":"ContainerStarted","Data":"895c36e3a5d4ba16933bb18be77e08f86f3c1152e4e1a22c542b2f820053c45a"} Jan 28 19:24:59 crc kubenswrapper[4767]: I0128 19:24:59.914294 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"34ec71217a6b006a3a33c4eb869cd5a266f9db72104e528bff51b018d9e58d57"} Jan 28 19:25:01 crc kubenswrapper[4767]: I0128 19:25:01.938339 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm9nc" event={"ID":"56a7720b-a6a0-432a-bd24-eaab9a30780a","Type":"ContainerStarted","Data":"e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292"} Jan 28 19:25:04 crc kubenswrapper[4767]: I0128 19:25:04.973934 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerStarted","Data":"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db"} Jan 28 19:25:05 crc kubenswrapper[4767]: I0128 19:25:05.988949 4767 generic.go:334] "Generic (PLEG): container finished" podID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerID="e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292" exitCode=0 Jan 28 19:25:05 crc kubenswrapper[4767]: I0128 19:25:05.989050 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm9nc" event={"ID":"56a7720b-a6a0-432a-bd24-eaab9a30780a","Type":"ContainerDied","Data":"e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292"} Jan 28 19:25:08 crc kubenswrapper[4767]: I0128 19:25:08.017519 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm9nc" event={"ID":"56a7720b-a6a0-432a-bd24-eaab9a30780a","Type":"ContainerStarted","Data":"4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142"} Jan 28 19:25:08 crc kubenswrapper[4767]: I0128 19:25:08.054058 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lm9nc" podStartSLOduration=7.721703346 podStartE2EDuration="15.054029108s" podCreationTimestamp="2026-01-28 19:24:53 +0000 UTC" firstStartedPulling="2026-01-28 19:24:59.911057229 +0000 UTC m=+3305.875240103" lastFinishedPulling="2026-01-28 19:25:07.243382991 +0000 UTC m=+3313.207565865" observedRunningTime="2026-01-28 19:25:08.038013895 +0000 UTC m=+3314.002196779" watchObservedRunningTime="2026-01-28 19:25:08.054029108 +0000 UTC m=+3314.018211982" Jan 28 19:25:11 crc kubenswrapper[4767]: I0128 19:25:11.062771 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerStarted","Data":"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986"} Jan 28 19:25:11 crc kubenswrapper[4767]: I0128 19:25:11.113681 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=3.89291526 podStartE2EDuration="42.113649983s" podCreationTimestamp="2026-01-28 19:24:29 +0000 UTC" firstStartedPulling="2026-01-28 19:24:32.195654727 +0000 UTC m=+3278.159837601" lastFinishedPulling="2026-01-28 19:25:10.41638945 +0000 UTC m=+3316.380572324" observedRunningTime="2026-01-28 19:25:11.106280912 +0000 UTC m=+3317.070463806" watchObservedRunningTime="2026-01-28 19:25:11.113649983 +0000 UTC m=+3317.077832857" Jan 28 19:25:11 crc kubenswrapper[4767]: I0128 19:25:11.529503 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:13 crc kubenswrapper[4767]: I0128 19:25:13.494909 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:25:13 crc kubenswrapper[4767]: I0128 19:25:13.495547 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:25:14 crc kubenswrapper[4767]: I0128 19:25:14.555201 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-lm9nc" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="registry-server" probeResult="failure" output=< Jan 28 19:25:14 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 19:25:14 crc kubenswrapper[4767]: > Jan 28 19:25:16 crc kubenswrapper[4767]: I0128 19:25:16.529753 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:16 crc kubenswrapper[4767]: I0128 19:25:16.534350 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:17 crc kubenswrapper[4767]: I0128 19:25:17.126109 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:18 crc kubenswrapper[4767]: I0128 19:25:18.985332 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:18 crc kubenswrapper[4767]: I0128 19:25:18.985926 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" containerName="openstackclient" containerID="cri-o://1b9ad0d125732d9a9b87a9cd1f51ffec115d3b7716227021b9f186c061bc5d96" gracePeriod=2 Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.004173 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.042768 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:19 crc kubenswrapper[4767]: E0128 19:25:19.043334 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" containerName="openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.043352 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" containerName="openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.043588 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" containerName="openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.044415 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.052840 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="98b184ce-a502-4335-a4fb-1475658bed4b" podUID="ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.068710 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.082598 4767 status_manager.go:875] "Failed to update status for pod" pod="openstack/openstackclient" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:25:19Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:25:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:25:19Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-28T19:25:19Z\\\",\\\"message\\\":\\\"containers with unready status: [openstackclient]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"openstackclient\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/clouds.yaml\\\",\\\"name\\\":\\\"openstack-config\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/.config/openstack/secure.yaml\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/home/cloud-admin/cloudrc\\\",\\\"name\\\":\\\"openstack-config-secret\\\"},{\\\"mountPath\\\":\\\"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\\\",\\\"name\\\":\\\"combined-ca-bundle\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppcq5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-28T19:25:19Z\\\"}}\" for pod \"openstack\"/\"openstackclient\": pods \"openstackclient\" not found" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.097506 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:19 crc kubenswrapper[4767]: E0128 19:25:19.098573 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[combined-ca-bundle kube-api-access-ppcq5 openstack-config openstack-config-secret], unattached volumes=[], failed to process volumes=[combined-ca-bundle kube-api-access-ppcq5 openstack-config openstack-config-secret]: context canceled" pod="openstack/openstackclient" podUID="ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.121734 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.134096 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.135740 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.140661 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337" podUID="50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.156435 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.162492 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.184331 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.186795 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337" podUID="50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.206911 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-openstack-config\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.206981 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-openstack-config-secret\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.207056 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-combined-ca-bundle\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.207097 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcxtf\" (UniqueName: \"kubernetes.io/projected/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-kube-api-access-gcxtf\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.309323 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-openstack-config\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.309791 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-openstack-config-secret\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.309918 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-combined-ca-bundle\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.309985 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcxtf\" (UniqueName: \"kubernetes.io/projected/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-kube-api-access-gcxtf\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.311188 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-openstack-config\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.325749 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-combined-ca-bundle\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.337915 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-openstack-config-secret\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.350587 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.350891 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-api" containerID="cri-o://529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda" gracePeriod=30 Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.354382 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-notifier" containerID="cri-o://535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8" gracePeriod=30 Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.354496 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-listener" containerID="cri-o://44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40" gracePeriod=30 Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.354572 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-evaluator" containerID="cri-o://1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3" gracePeriod=30 Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.358052 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcxtf\" (UniqueName: \"kubernetes.io/projected/50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58-kube-api-access-gcxtf\") pod \"openstackclient\" (UID: \"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58\") " pod="openstack/openstackclient" Jan 28 19:25:19 crc kubenswrapper[4767]: I0128 19:25:19.478942 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.169320 4767 generic.go:334] "Generic (PLEG): container finished" podID="0b821092-04ee-43e7-beba-8cd160857435" containerID="1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3" exitCode=0 Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.170782 4767 generic.go:334] "Generic (PLEG): container finished" podID="0b821092-04ee-43e7-beba-8cd160857435" containerID="529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda" exitCode=0 Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.169401 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerDied","Data":"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3"} Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.170861 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.170867 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerDied","Data":"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda"} Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.174694 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337" podUID="50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58" Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.186926 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337" podUID="50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58" Jan 28 19:25:20 crc kubenswrapper[4767]: W0128 19:25:20.284854 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50ab1c7e_8b1e_4bbe_9574_15b14f7d0a58.slice/crio-83d85696cc73d086a9602c36036aad80965751aee413b3590047bc611242862f WatchSource:0}: Error finding container 83d85696cc73d086a9602c36036aad80965751aee413b3590047bc611242862f: Status 404 returned error can't find the container with id 83d85696cc73d086a9602c36036aad80965751aee413b3590047bc611242862f Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.286650 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 28 19:25:20 crc kubenswrapper[4767]: I0128 19:25:20.809027 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337" path="/var/lib/kubelet/pods/ff25a1a5-b0a1-4ee2-9d33-8ef6d9cdc337/volumes" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.108112 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.108855 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="prometheus" containerID="cri-o://8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8" gracePeriod=600 Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.109483 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="thanos-sidecar" containerID="cri-o://0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986" gracePeriod=600 Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.109564 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="config-reloader" containerID="cri-o://bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db" gracePeriod=600 Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.189941 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58","Type":"ContainerStarted","Data":"8b92b7e89503cc29c7969fa97c8d8ad4b755509de8ca7e1f824bf3308c9f426d"} Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.189998 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58","Type":"ContainerStarted","Data":"83d85696cc73d086a9602c36036aad80965751aee413b3590047bc611242862f"} Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.195468 4767 generic.go:334] "Generic (PLEG): container finished" podID="98b184ce-a502-4335-a4fb-1475658bed4b" containerID="1b9ad0d125732d9a9b87a9cd1f51ffec115d3b7716227021b9f186c061bc5d96" exitCode=137 Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.219739 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.219713436 podStartE2EDuration="2.219713436s" podCreationTimestamp="2026-01-28 19:25:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:25:21.216880737 +0000 UTC m=+3327.181063611" watchObservedRunningTime="2026-01-28 19:25:21.219713436 +0000 UTC m=+3327.183896310" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.321967 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.473264 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kd47\" (UniqueName: \"kubernetes.io/projected/98b184ce-a502-4335-a4fb-1475658bed4b-kube-api-access-6kd47\") pod \"98b184ce-a502-4335-a4fb-1475658bed4b\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.473316 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config-secret\") pod \"98b184ce-a502-4335-a4fb-1475658bed4b\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.473398 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config\") pod \"98b184ce-a502-4335-a4fb-1475658bed4b\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.473657 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-combined-ca-bundle\") pod \"98b184ce-a502-4335-a4fb-1475658bed4b\" (UID: \"98b184ce-a502-4335-a4fb-1475658bed4b\") " Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.531222 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/prometheus-metric-storage-0" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="prometheus" probeResult="failure" output="Get \"http://10.217.1.15:9090/-/ready\": dial tcp 10.217.1.15:9090: connect: connection refused" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.547669 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98b184ce-a502-4335-a4fb-1475658bed4b-kube-api-access-6kd47" (OuterVolumeSpecName: "kube-api-access-6kd47") pod "98b184ce-a502-4335-a4fb-1475658bed4b" (UID: "98b184ce-a502-4335-a4fb-1475658bed4b"). InnerVolumeSpecName "kube-api-access-6kd47". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.549386 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "98b184ce-a502-4335-a4fb-1475658bed4b" (UID: "98b184ce-a502-4335-a4fb-1475658bed4b"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.567452 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "98b184ce-a502-4335-a4fb-1475658bed4b" (UID: "98b184ce-a502-4335-a4fb-1475658bed4b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.577181 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.577238 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kd47\" (UniqueName: \"kubernetes.io/projected/98b184ce-a502-4335-a4fb-1475658bed4b-kube-api-access-6kd47\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.577258 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.581428 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "98b184ce-a502-4335-a4fb-1475658bed4b" (UID: "98b184ce-a502-4335-a4fb-1475658bed4b"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:21 crc kubenswrapper[4767]: I0128 19:25:21.679253 4767 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/98b184ce-a502-4335-a4fb-1475658bed4b-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.143134 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189265 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-web-config\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189330 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-2\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189361 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-42r4c\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-kube-api-access-42r4c\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189393 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-thanos-prometheus-http-client-file\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189602 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-1\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189679 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189707 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc528c55-e822-485c-a824-9a4c15689009-config-out\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189745 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-config\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.189977 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-0\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.190018 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-tls-assets\") pod \"fc528c55-e822-485c-a824-9a4c15689009\" (UID: \"fc528c55-e822-485c-a824-9a4c15689009\") " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.190229 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.190462 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.191177 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.191202 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.192706 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.197263 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-config" (OuterVolumeSpecName: "config") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.198820 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.200471 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.211545 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc528c55-e822-485c-a824-9a4c15689009-config-out" (OuterVolumeSpecName: "config-out") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.211655 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-kube-api-access-42r4c" (OuterVolumeSpecName: "kube-api-access-42r4c") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "kube-api-access-42r4c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.215610 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.219965 4767 scope.go:117] "RemoveContainer" containerID="1b9ad0d125732d9a9b87a9cd1f51ffec115d3b7716227021b9f186c061bc5d96" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.219975 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.226161 4767 generic.go:334] "Generic (PLEG): container finished" podID="fc528c55-e822-485c-a824-9a4c15689009" containerID="0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986" exitCode=0 Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.226307 4767 generic.go:334] "Generic (PLEG): container finished" podID="fc528c55-e822-485c-a824-9a4c15689009" containerID="bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db" exitCode=0 Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.226380 4767 generic.go:334] "Generic (PLEG): container finished" podID="fc528c55-e822-485c-a824-9a4c15689009" containerID="8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8" exitCode=0 Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.227361 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.229415 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerDied","Data":"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986"} Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.229534 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerDied","Data":"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db"} Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.229901 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerDied","Data":"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8"} Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.230075 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"fc528c55-e822-485c-a824-9a4c15689009","Type":"ContainerDied","Data":"490491512ead3ad3db50e6cb19cc3345ddaa7c478cddfb1c82a0e49b4983acb6"} Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.235750 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-web-config" (OuterVolumeSpecName: "web-config") pod "fc528c55-e822-485c-a824-9a4c15689009" (UID: "fc528c55-e822-485c-a824-9a4c15689009"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.260569 4767 scope.go:117] "RemoveContainer" containerID="0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.260800 4767 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="98b184ce-a502-4335-a4fb-1475658bed4b" podUID="50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295132 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295178 4767 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/fc528c55-e822-485c-a824-9a4c15689009-config-out\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295192 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295229 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/fc528c55-e822-485c-a824-9a4c15689009-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295244 4767 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295257 4767 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-web-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295269 4767 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/fc528c55-e822-485c-a824-9a4c15689009-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.295284 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-42r4c\" (UniqueName: \"kubernetes.io/projected/fc528c55-e822-485c-a824-9a4c15689009-kube-api-access-42r4c\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.317064 4767 scope.go:117] "RemoveContainer" containerID="bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.319292 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.397452 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.404352 4767 scope.go:117] "RemoveContainer" containerID="8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.432109 4767 scope.go:117] "RemoveContainer" containerID="3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.462482 4767 scope.go:117] "RemoveContainer" containerID="0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986" Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.464760 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986\": container with ID starting with 0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986 not found: ID does not exist" containerID="0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.464816 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986"} err="failed to get container status \"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986\": rpc error: code = NotFound desc = could not find container \"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986\": container with ID starting with 0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986 not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.464851 4767 scope.go:117] "RemoveContainer" containerID="bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db" Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.465917 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db\": container with ID starting with bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db not found: ID does not exist" containerID="bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.465985 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db"} err="failed to get container status \"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db\": rpc error: code = NotFound desc = could not find container \"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db\": container with ID starting with bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.466028 4767 scope.go:117] "RemoveContainer" containerID="8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8" Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.466512 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8\": container with ID starting with 8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8 not found: ID does not exist" containerID="8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.466561 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8"} err="failed to get container status \"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8\": rpc error: code = NotFound desc = could not find container \"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8\": container with ID starting with 8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8 not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.466587 4767 scope.go:117] "RemoveContainer" containerID="3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f" Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.467070 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f\": container with ID starting with 3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f not found: ID does not exist" containerID="3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.467105 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f"} err="failed to get container status \"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f\": rpc error: code = NotFound desc = could not find container \"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f\": container with ID starting with 3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.467128 4767 scope.go:117] "RemoveContainer" containerID="0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.467627 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986"} err="failed to get container status \"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986\": rpc error: code = NotFound desc = could not find container \"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986\": container with ID starting with 0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986 not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.467682 4767 scope.go:117] "RemoveContainer" containerID="bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.467972 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db"} err="failed to get container status \"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db\": rpc error: code = NotFound desc = could not find container \"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db\": container with ID starting with bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.467995 4767 scope.go:117] "RemoveContainer" containerID="8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.468239 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8"} err="failed to get container status \"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8\": rpc error: code = NotFound desc = could not find container \"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8\": container with ID starting with 8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8 not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.468265 4767 scope.go:117] "RemoveContainer" containerID="3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.468510 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f"} err="failed to get container status \"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f\": rpc error: code = NotFound desc = could not find container \"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f\": container with ID starting with 3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.468538 4767 scope.go:117] "RemoveContainer" containerID="0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.469105 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986"} err="failed to get container status \"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986\": rpc error: code = NotFound desc = could not find container \"0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986\": container with ID starting with 0227af32b18ec43a0874af0afc72664c2afb8a05bd37e2d6a1be09f63e263986 not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.469133 4767 scope.go:117] "RemoveContainer" containerID="bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.469405 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db"} err="failed to get container status \"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db\": rpc error: code = NotFound desc = could not find container \"bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db\": container with ID starting with bb670172eae75db314f890572006ee7530356299ec6d9f3966ee21e2916013db not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.469440 4767 scope.go:117] "RemoveContainer" containerID="8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.469673 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8"} err="failed to get container status \"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8\": rpc error: code = NotFound desc = could not find container \"8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8\": container with ID starting with 8207087fa0141f3f449ae6645bcf6468d04c09cca107056f1f6e48858b7964e8 not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.469698 4767 scope.go:117] "RemoveContainer" containerID="3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.469963 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f"} err="failed to get container status \"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f\": rpc error: code = NotFound desc = could not find container \"3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f\": container with ID starting with 3477603469a889e2674803d917a4da4ec73b44c69293394ba16691092c7e572f not found: ID does not exist" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.573113 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.590454 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.606503 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.607026 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="init-config-reloader" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.607048 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="init-config-reloader" Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.607068 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="prometheus" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.607076 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="prometheus" Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.607087 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="config-reloader" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.607096 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="config-reloader" Jan 28 19:25:22 crc kubenswrapper[4767]: E0128 19:25:22.607117 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="thanos-sidecar" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.607125 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="thanos-sidecar" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.607388 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="thanos-sidecar" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.607409 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="prometheus" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.607426 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc528c55-e822-485c-a824-9a4c15689009" containerName="config-reloader" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.609451 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.611580 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.614086 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-b9jgx" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.614324 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.614464 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.614687 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.614862 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.615305 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.615478 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.627550 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.631235 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704370 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftd5k\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-kube-api-access-ftd5k\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704446 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704491 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704525 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704553 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704611 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704682 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-config\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704719 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704770 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704805 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/77d4467f-0d9f-4550-afc8-9608ce579795-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704830 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704848 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.704888 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808041 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808120 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808156 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808184 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808235 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808297 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-config\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808324 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808369 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808400 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/77d4467f-0d9f-4550-afc8-9608ce579795-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808419 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808440 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808476 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808487 4767 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.808536 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftd5k\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-kube-api-access-ftd5k\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.809252 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.810004 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.821675 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.825136 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/77d4467f-0d9f-4550-afc8-9608ce579795-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.825773 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.826550 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.827199 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.829876 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.836225 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.836931 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-config\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.839557 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98b184ce-a502-4335-a4fb-1475658bed4b" path="/var/lib/kubelet/pods/98b184ce-a502-4335-a4fb-1475658bed4b/volumes" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.842234 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc528c55-e822-485c-a824-9a4c15689009" path="/var/lib/kubelet/pods/fc528c55-e822-485c-a824-9a4c15689009/volumes" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.849577 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftd5k\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-kube-api-access-ftd5k\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.850103 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.892708 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"prometheus-metric-storage-0\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:22 crc kubenswrapper[4767]: I0128 19:25:22.932004 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:23 crc kubenswrapper[4767]: I0128 19:25:23.551727 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:25:23 crc kubenswrapper[4767]: I0128 19:25:23.611428 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:25:23 crc kubenswrapper[4767]: I0128 19:25:23.675701 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:25:23 crc kubenswrapper[4767]: W0128 19:25:23.677479 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod77d4467f_0d9f_4550_afc8_9608ce579795.slice/crio-976698b192ab2540b7b60560af03c5d25628c297aed1386693b6cbf08e9fdf28 WatchSource:0}: Error finding container 976698b192ab2540b7b60560af03c5d25628c297aed1386693b6cbf08e9fdf28: Status 404 returned error can't find the container with id 976698b192ab2540b7b60560af03c5d25628c297aed1386693b6cbf08e9fdf28 Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.225511 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.295687 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerStarted","Data":"976698b192ab2540b7b60560af03c5d25628c297aed1386693b6cbf08e9fdf28"} Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.308040 4767 generic.go:334] "Generic (PLEG): container finished" podID="0b821092-04ee-43e7-beba-8cd160857435" containerID="44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40" exitCode=0 Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.308096 4767 generic.go:334] "Generic (PLEG): container finished" podID="0b821092-04ee-43e7-beba-8cd160857435" containerID="535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8" exitCode=0 Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.309370 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.309501 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerDied","Data":"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40"} Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.309544 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerDied","Data":"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8"} Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.309556 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"0b821092-04ee-43e7-beba-8cd160857435","Type":"ContainerDied","Data":"debf48c9771816c8ae5c04d316f26d45601641ec28119744e7d46607b7d49e80"} Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.309576 4767 scope.go:117] "RemoveContainer" containerID="44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.345241 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lm9nc"] Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.352104 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs\") pod \"0b821092-04ee-43e7-beba-8cd160857435\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.352270 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-combined-ca-bundle\") pod \"0b821092-04ee-43e7-beba-8cd160857435\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.352399 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-scripts\") pod \"0b821092-04ee-43e7-beba-8cd160857435\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.352591 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd699\" (UniqueName: \"kubernetes.io/projected/0b821092-04ee-43e7-beba-8cd160857435-kube-api-access-sd699\") pod \"0b821092-04ee-43e7-beba-8cd160857435\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.352677 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-config-data\") pod \"0b821092-04ee-43e7-beba-8cd160857435\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.359626 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-scripts" (OuterVolumeSpecName: "scripts") pod "0b821092-04ee-43e7-beba-8cd160857435" (UID: "0b821092-04ee-43e7-beba-8cd160857435"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.359835 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b821092-04ee-43e7-beba-8cd160857435-kube-api-access-sd699" (OuterVolumeSpecName: "kube-api-access-sd699") pod "0b821092-04ee-43e7-beba-8cd160857435" (UID: "0b821092-04ee-43e7-beba-8cd160857435"). InnerVolumeSpecName "kube-api-access-sd699". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.362224 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-public-tls-certs\") pod \"0b821092-04ee-43e7-beba-8cd160857435\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.369113 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd699\" (UniqueName: \"kubernetes.io/projected/0b821092-04ee-43e7-beba-8cd160857435-kube-api-access-sd699\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.369186 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.446490 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "0b821092-04ee-43e7-beba-8cd160857435" (UID: "0b821092-04ee-43e7-beba-8cd160857435"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.469357 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0b821092-04ee-43e7-beba-8cd160857435" (UID: "0b821092-04ee-43e7-beba-8cd160857435"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.470492 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs\") pod \"0b821092-04ee-43e7-beba-8cd160857435\" (UID: \"0b821092-04ee-43e7-beba-8cd160857435\") " Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.471013 4767 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:24 crc kubenswrapper[4767]: W0128 19:25:24.471087 4767 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/0b821092-04ee-43e7-beba-8cd160857435/volumes/kubernetes.io~secret/internal-tls-certs Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.471096 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "0b821092-04ee-43e7-beba-8cd160857435" (UID: "0b821092-04ee-43e7-beba-8cd160857435"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.528417 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-config-data" (OuterVolumeSpecName: "config-data") pod "0b821092-04ee-43e7-beba-8cd160857435" (UID: "0b821092-04ee-43e7-beba-8cd160857435"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.552935 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0b821092-04ee-43e7-beba-8cd160857435" (UID: "0b821092-04ee-43e7-beba-8cd160857435"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.573049 4767 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.573107 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.573120 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0b821092-04ee-43e7-beba-8cd160857435-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.663827 4767 scope.go:117] "RemoveContainer" containerID="535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.681621 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.694780 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.711391 4767 scope.go:117] "RemoveContainer" containerID="1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.712012 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.712763 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-evaluator" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.712790 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-evaluator" Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.712810 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-notifier" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.712817 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-notifier" Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.712847 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-listener" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.712853 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-listener" Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.712883 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-api" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.712892 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-api" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.713117 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-api" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.713143 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-evaluator" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.713163 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-notifier" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.713177 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b821092-04ee-43e7-beba-8cd160857435" containerName="aodh-listener" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.715460 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.719744 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.721469 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.722262 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.722489 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-chl48" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.722519 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.729580 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.753370 4767 scope.go:117] "RemoveContainer" containerID="529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.779195 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-config-data\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.779261 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-scripts\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.779292 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-internal-tls-certs\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.779314 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.779334 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qsgx\" (UniqueName: \"kubernetes.io/projected/3cab4d05-5139-401e-b6f1-9818d4b7fe85-kube-api-access-6qsgx\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.779370 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-public-tls-certs\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.786554 4767 scope.go:117] "RemoveContainer" containerID="44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40" Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.786995 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40\": container with ID starting with 44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40 not found: ID does not exist" containerID="44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.787027 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40"} err="failed to get container status \"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40\": rpc error: code = NotFound desc = could not find container \"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40\": container with ID starting with 44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40 not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.787053 4767 scope.go:117] "RemoveContainer" containerID="535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8" Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.787522 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8\": container with ID starting with 535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8 not found: ID does not exist" containerID="535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.787542 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8"} err="failed to get container status \"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8\": rpc error: code = NotFound desc = could not find container \"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8\": container with ID starting with 535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8 not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.787559 4767 scope.go:117] "RemoveContainer" containerID="1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3" Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.787832 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3\": container with ID starting with 1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3 not found: ID does not exist" containerID="1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.787849 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3"} err="failed to get container status \"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3\": rpc error: code = NotFound desc = could not find container \"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3\": container with ID starting with 1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3 not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.787863 4767 scope.go:117] "RemoveContainer" containerID="529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda" Jan 28 19:25:24 crc kubenswrapper[4767]: E0128 19:25:24.788079 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda\": container with ID starting with 529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda not found: ID does not exist" containerID="529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788101 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda"} err="failed to get container status \"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda\": rpc error: code = NotFound desc = could not find container \"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda\": container with ID starting with 529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788114 4767 scope.go:117] "RemoveContainer" containerID="44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788388 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40"} err="failed to get container status \"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40\": rpc error: code = NotFound desc = could not find container \"44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40\": container with ID starting with 44a1a45fc4bf5dd1e0efb7f262233618325a5ea3b2fb9bd39b0155567ea05d40 not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788410 4767 scope.go:117] "RemoveContainer" containerID="535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788695 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8"} err="failed to get container status \"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8\": rpc error: code = NotFound desc = could not find container \"535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8\": container with ID starting with 535dbc22a931bef9ad3267095e9bc868e3ea3c9f4433134efecbb776744b6fb8 not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788711 4767 scope.go:117] "RemoveContainer" containerID="1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788896 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3"} err="failed to get container status \"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3\": rpc error: code = NotFound desc = could not find container \"1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3\": container with ID starting with 1e24c17adc8a7236ec10d8b2377b33810692aa6c5e1e431d86e519c70fb31cd3 not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.788911 4767 scope.go:117] "RemoveContainer" containerID="529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.789084 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda"} err="failed to get container status \"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda\": rpc error: code = NotFound desc = could not find container \"529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda\": container with ID starting with 529f103781bd009c9ab20cb123aa08688ada46e90dc9e02fa2b1a2a6ee5e0eda not found: ID does not exist" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.807528 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b821092-04ee-43e7-beba-8cd160857435" path="/var/lib/kubelet/pods/0b821092-04ee-43e7-beba-8cd160857435/volumes" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.882469 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-config-data\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.882519 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-scripts\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.882551 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-internal-tls-certs\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.882583 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.882604 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qsgx\" (UniqueName: \"kubernetes.io/projected/3cab4d05-5139-401e-b6f1-9818d4b7fe85-kube-api-access-6qsgx\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.882673 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-public-tls-certs\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.888633 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-scripts\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.888871 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-combined-ca-bundle\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.889613 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-config-data\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.891038 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-public-tls-certs\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.899157 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-internal-tls-certs\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:24 crc kubenswrapper[4767]: I0128 19:25:24.909585 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qsgx\" (UniqueName: \"kubernetes.io/projected/3cab4d05-5139-401e-b6f1-9818d4b7fe85-kube-api-access-6qsgx\") pod \"aodh-0\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " pod="openstack/aodh-0" Jan 28 19:25:25 crc kubenswrapper[4767]: I0128 19:25:25.039409 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:25:25 crc kubenswrapper[4767]: I0128 19:25:25.333528 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-lm9nc" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="registry-server" containerID="cri-o://4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142" gracePeriod=2 Jan 28 19:25:25 crc kubenswrapper[4767]: I0128 19:25:25.550714 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:25:25 crc kubenswrapper[4767]: I0128 19:25:25.868398 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.017243 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-catalog-content\") pod \"56a7720b-a6a0-432a-bd24-eaab9a30780a\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.017365 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-utilities\") pod \"56a7720b-a6a0-432a-bd24-eaab9a30780a\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.017435 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7pqs8\" (UniqueName: \"kubernetes.io/projected/56a7720b-a6a0-432a-bd24-eaab9a30780a-kube-api-access-7pqs8\") pod \"56a7720b-a6a0-432a-bd24-eaab9a30780a\" (UID: \"56a7720b-a6a0-432a-bd24-eaab9a30780a\") " Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.018487 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-utilities" (OuterVolumeSpecName: "utilities") pod "56a7720b-a6a0-432a-bd24-eaab9a30780a" (UID: "56a7720b-a6a0-432a-bd24-eaab9a30780a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.046659 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/56a7720b-a6a0-432a-bd24-eaab9a30780a-kube-api-access-7pqs8" (OuterVolumeSpecName: "kube-api-access-7pqs8") pod "56a7720b-a6a0-432a-bd24-eaab9a30780a" (UID: "56a7720b-a6a0-432a-bd24-eaab9a30780a"). InnerVolumeSpecName "kube-api-access-7pqs8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.085368 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "56a7720b-a6a0-432a-bd24-eaab9a30780a" (UID: "56a7720b-a6a0-432a-bd24-eaab9a30780a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.121153 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.121199 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/56a7720b-a6a0-432a-bd24-eaab9a30780a-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.121268 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7pqs8\" (UniqueName: \"kubernetes.io/projected/56a7720b-a6a0-432a-bd24-eaab9a30780a-kube-api-access-7pqs8\") on node \"crc\" DevicePath \"\"" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.343902 4767 generic.go:334] "Generic (PLEG): container finished" podID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerID="4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142" exitCode=0 Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.344093 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lm9nc" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.344099 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm9nc" event={"ID":"56a7720b-a6a0-432a-bd24-eaab9a30780a","Type":"ContainerDied","Data":"4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142"} Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.344183 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lm9nc" event={"ID":"56a7720b-a6a0-432a-bd24-eaab9a30780a","Type":"ContainerDied","Data":"895c36e3a5d4ba16933bb18be77e08f86f3c1152e4e1a22c542b2f820053c45a"} Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.344234 4767 scope.go:117] "RemoveContainer" containerID="4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.347769 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerStarted","Data":"2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89"} Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.347807 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerStarted","Data":"7c5382633339b38387197be2da69c52ee54bfeb574c04937603fc3803aef0118"} Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.513843 4767 scope.go:117] "RemoveContainer" containerID="e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.530128 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-lm9nc"] Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.542895 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-lm9nc"] Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.554415 4767 scope.go:117] "RemoveContainer" containerID="1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.810635 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" path="/var/lib/kubelet/pods/56a7720b-a6a0-432a-bd24-eaab9a30780a/volumes" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.841348 4767 scope.go:117] "RemoveContainer" containerID="4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142" Jan 28 19:25:26 crc kubenswrapper[4767]: E0128 19:25:26.842116 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142\": container with ID starting with 4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142 not found: ID does not exist" containerID="4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.842161 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142"} err="failed to get container status \"4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142\": rpc error: code = NotFound desc = could not find container \"4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142\": container with ID starting with 4d82183f7692fc94c387cf135133d4219cc756f774ecdcf0a195ff256f617142 not found: ID does not exist" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.842186 4767 scope.go:117] "RemoveContainer" containerID="e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292" Jan 28 19:25:26 crc kubenswrapper[4767]: E0128 19:25:26.842780 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292\": container with ID starting with e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292 not found: ID does not exist" containerID="e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.842806 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292"} err="failed to get container status \"e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292\": rpc error: code = NotFound desc = could not find container \"e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292\": container with ID starting with e60394328bc585d69d770b0480c3955fb6b1d3e020b01b6cb3a2da15a2ea1292 not found: ID does not exist" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.842821 4767 scope.go:117] "RemoveContainer" containerID="1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827" Jan 28 19:25:26 crc kubenswrapper[4767]: E0128 19:25:26.843197 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827\": container with ID starting with 1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827 not found: ID does not exist" containerID="1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827" Jan 28 19:25:26 crc kubenswrapper[4767]: I0128 19:25:26.843244 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827"} err="failed to get container status \"1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827\": rpc error: code = NotFound desc = could not find container \"1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827\": container with ID starting with 1e71d965707c38e02c5628ee53fd488f7a9888be178276a04130a100777b1827 not found: ID does not exist" Jan 28 19:25:28 crc kubenswrapper[4767]: I0128 19:25:28.381161 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerStarted","Data":"bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52"} Jan 28 19:25:28 crc kubenswrapper[4767]: I0128 19:25:28.381551 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerStarted","Data":"e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f"} Jan 28 19:25:29 crc kubenswrapper[4767]: I0128 19:25:29.395178 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerStarted","Data":"eadc77631dd4eba97463459e8bc29f2bf7da47ef569f6b9f122ca7f7e6922455"} Jan 28 19:25:29 crc kubenswrapper[4767]: I0128 19:25:29.398938 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerStarted","Data":"af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e"} Jan 28 19:25:29 crc kubenswrapper[4767]: I0128 19:25:29.465682 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.373889337 podStartE2EDuration="5.465657842s" podCreationTimestamp="2026-01-28 19:25:24 +0000 UTC" firstStartedPulling="2026-01-28 19:25:25.595389711 +0000 UTC m=+3331.559572595" lastFinishedPulling="2026-01-28 19:25:28.687158226 +0000 UTC m=+3334.651341100" observedRunningTime="2026-01-28 19:25:29.443830455 +0000 UTC m=+3335.408013349" watchObservedRunningTime="2026-01-28 19:25:29.465657842 +0000 UTC m=+3335.429840726" Jan 28 19:25:36 crc kubenswrapper[4767]: I0128 19:25:36.472833 4767 generic.go:334] "Generic (PLEG): container finished" podID="77d4467f-0d9f-4550-afc8-9608ce579795" containerID="eadc77631dd4eba97463459e8bc29f2bf7da47ef569f6b9f122ca7f7e6922455" exitCode=0 Jan 28 19:25:36 crc kubenswrapper[4767]: I0128 19:25:36.472924 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerDied","Data":"eadc77631dd4eba97463459e8bc29f2bf7da47ef569f6b9f122ca7f7e6922455"} Jan 28 19:25:37 crc kubenswrapper[4767]: I0128 19:25:37.489745 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerStarted","Data":"1aa46d20965ed3bf0482e402ce5ecb2833ed9c4772194406146af38fb6e27d70"} Jan 28 19:25:41 crc kubenswrapper[4767]: I0128 19:25:41.536408 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerStarted","Data":"1554d3358defeb3c99a09f166877692132aea930234e78c52fc1cf20b66cc186"} Jan 28 19:25:41 crc kubenswrapper[4767]: I0128 19:25:41.538616 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerStarted","Data":"0c56481135024ac77a44243b3c2a75396dee58d506f9f0827f55e8768a619418"} Jan 28 19:25:41 crc kubenswrapper[4767]: I0128 19:25:41.570187 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=19.57016578 podStartE2EDuration="19.57016578s" podCreationTimestamp="2026-01-28 19:25:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:25:41.569143928 +0000 UTC m=+3347.533326812" watchObservedRunningTime="2026-01-28 19:25:41.57016578 +0000 UTC m=+3347.534348654" Jan 28 19:25:42 crc kubenswrapper[4767]: I0128 19:25:42.932713 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:52 crc kubenswrapper[4767]: I0128 19:25:52.933569 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:52 crc kubenswrapper[4767]: I0128 19:25:52.940693 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 19:25:53 crc kubenswrapper[4767]: I0128 19:25:53.679587 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:15 crc kubenswrapper[4767]: I0128 19:27:15.455301 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:27:15 crc kubenswrapper[4767]: I0128 19:27:15.455946 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:27:19 crc kubenswrapper[4767]: I0128 19:27:19.496547 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-79f6f75b9c-dhf5c_0d7c50d3-1348-43e5-a8fa-f05cd53d2a42/manager/0.log" Jan 28 19:27:21 crc kubenswrapper[4767]: I0128 19:27:21.818644 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:27:21 crc kubenswrapper[4767]: I0128 19:27:21.819321 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="prometheus" containerID="cri-o://1aa46d20965ed3bf0482e402ce5ecb2833ed9c4772194406146af38fb6e27d70" gracePeriod=600 Jan 28 19:27:21 crc kubenswrapper[4767]: I0128 19:27:21.819931 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="thanos-sidecar" containerID="cri-o://1554d3358defeb3c99a09f166877692132aea930234e78c52fc1cf20b66cc186" gracePeriod=600 Jan 28 19:27:21 crc kubenswrapper[4767]: I0128 19:27:21.819991 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="config-reloader" containerID="cri-o://0c56481135024ac77a44243b3c2a75396dee58d506f9f0827f55e8768a619418" gracePeriod=600 Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.687164 4767 generic.go:334] "Generic (PLEG): container finished" podID="77d4467f-0d9f-4550-afc8-9608ce579795" containerID="1554d3358defeb3c99a09f166877692132aea930234e78c52fc1cf20b66cc186" exitCode=0 Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.687996 4767 generic.go:334] "Generic (PLEG): container finished" podID="77d4467f-0d9f-4550-afc8-9608ce579795" containerID="0c56481135024ac77a44243b3c2a75396dee58d506f9f0827f55e8768a619418" exitCode=0 Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.688011 4767 generic.go:334] "Generic (PLEG): container finished" podID="77d4467f-0d9f-4550-afc8-9608ce579795" containerID="1aa46d20965ed3bf0482e402ce5ecb2833ed9c4772194406146af38fb6e27d70" exitCode=0 Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.687255 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerDied","Data":"1554d3358defeb3c99a09f166877692132aea930234e78c52fc1cf20b66cc186"} Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.688061 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerDied","Data":"0c56481135024ac77a44243b3c2a75396dee58d506f9f0827f55e8768a619418"} Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.688082 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerDied","Data":"1aa46d20965ed3bf0482e402ce5ecb2833ed9c4772194406146af38fb6e27d70"} Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.819197 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.961765 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-secret-combined-ca-bundle\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.961917 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-1\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.961951 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-thanos-prometheus-http-client-file\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962012 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/77d4467f-0d9f-4550-afc8-9608ce579795-config-out\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962054 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962080 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962146 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftd5k\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-kube-api-access-ftd5k\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962229 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962291 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-config\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962355 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-tls-assets\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962440 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-2\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962491 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-0\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.962539 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"77d4467f-0d9f-4550-afc8-9608ce579795\" (UID: \"77d4467f-0d9f-4550-afc8-9608ce579795\") " Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.964275 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.964532 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.964967 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.973505 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.973539 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.973540 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.974274 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.974296 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.975088 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-config" (OuterVolumeSpecName: "config") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.976664 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77d4467f-0d9f-4550-afc8-9608ce579795-config-out" (OuterVolumeSpecName: "config-out") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.976684 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-kube-api-access-ftd5k" (OuterVolumeSpecName: "kube-api-access-ftd5k") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "kube-api-access-ftd5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:27:22 crc kubenswrapper[4767]: I0128 19:27:22.977134 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.065229 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.065567 4767 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.065651 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.065864 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.065957 4767 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.066049 4767 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.066133 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/77d4467f-0d9f-4550-afc8-9608ce579795-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.066234 4767 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.066331 4767 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/77d4467f-0d9f-4550-afc8-9608ce579795-config-out\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.066413 4767 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.066503 4767 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.066592 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftd5k\" (UniqueName: \"kubernetes.io/projected/77d4467f-0d9f-4550-afc8-9608ce579795-kube-api-access-ftd5k\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.083430 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config" (OuterVolumeSpecName: "web-config") pod "77d4467f-0d9f-4550-afc8-9608ce579795" (UID: "77d4467f-0d9f-4550-afc8-9608ce579795"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.093440 4767 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.168964 4767 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.169004 4767 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/77d4467f-0d9f-4550-afc8-9608ce579795-web-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.710125 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"77d4467f-0d9f-4550-afc8-9608ce579795","Type":"ContainerDied","Data":"976698b192ab2540b7b60560af03c5d25628c297aed1386693b6cbf08e9fdf28"} Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.710620 4767 scope.go:117] "RemoveContainer" containerID="1554d3358defeb3c99a09f166877692132aea930234e78c52fc1cf20b66cc186" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.710240 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.748939 4767 scope.go:117] "RemoveContainer" containerID="0c56481135024ac77a44243b3c2a75396dee58d506f9f0827f55e8768a619418" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.750041 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.761889 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.780741 4767 scope.go:117] "RemoveContainer" containerID="1aa46d20965ed3bf0482e402ce5ecb2833ed9c4772194406146af38fb6e27d70" Jan 28 19:27:23 crc kubenswrapper[4767]: I0128 19:27:23.816590 4767 scope.go:117] "RemoveContainer" containerID="eadc77631dd4eba97463459e8bc29f2bf7da47ef569f6b9f122ca7f7e6922455" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.647910 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:27:24 crc kubenswrapper[4767]: E0128 19:27:24.648530 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="init-config-reloader" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.648548 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="init-config-reloader" Jan 28 19:27:24 crc kubenswrapper[4767]: E0128 19:27:24.648712 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="extract-content" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.648720 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="extract-content" Jan 28 19:27:24 crc kubenswrapper[4767]: E0128 19:27:24.648746 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="config-reloader" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.648755 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="config-reloader" Jan 28 19:27:24 crc kubenswrapper[4767]: E0128 19:27:24.648769 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="extract-utilities" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.648775 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="extract-utilities" Jan 28 19:27:24 crc kubenswrapper[4767]: E0128 19:27:24.648788 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="prometheus" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.648794 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="prometheus" Jan 28 19:27:24 crc kubenswrapper[4767]: E0128 19:27:24.648803 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="registry-server" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.648811 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="registry-server" Jan 28 19:27:24 crc kubenswrapper[4767]: E0128 19:27:24.648826 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="thanos-sidecar" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.648832 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="thanos-sidecar" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.649049 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="56a7720b-a6a0-432a-bd24-eaab9a30780a" containerName="registry-server" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.649064 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="thanos-sidecar" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.649082 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="prometheus" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.649093 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" containerName="config-reloader" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.651117 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.656974 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.657743 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.657963 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.657974 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.657996 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.658127 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.658235 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-b9jgx" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.661602 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.672153 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.678173 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.804498 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805053 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805192 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805250 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805346 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805384 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805414 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805442 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805544 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805566 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fc7p\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-kube-api-access-7fc7p\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805599 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805834 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-db\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.805942 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.809672 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77d4467f-0d9f-4550-afc8-9608ce579795" path="/var/lib/kubelet/pods/77d4467f-0d9f-4550-afc8-9608ce579795/volumes" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908701 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908801 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908823 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908848 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908919 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fc7p\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-kube-api-access-7fc7p\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908939 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908962 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.908997 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-db\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.909039 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.909089 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.909117 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.909190 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.909228 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.912887 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-db\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.918289 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.918776 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.919303 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.935340 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.935362 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.937122 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.941962 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.944664 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.944760 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.945136 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.950189 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.968285 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fc7p\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-kube-api-access-7fc7p\") pod \"prometheus-metric-storage-0\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:24 crc kubenswrapper[4767]: I0128 19:27:24.985909 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:25 crc kubenswrapper[4767]: I0128 19:27:25.579304 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:27:25 crc kubenswrapper[4767]: I0128 19:27:25.739528 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerStarted","Data":"36abd21613c419d8aed86163b640590a8c751805926b0cb57625bede58d26371"} Jan 28 19:27:30 crc kubenswrapper[4767]: I0128 19:27:30.809388 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerStarted","Data":"ef97b64d562c158a76ad906affea07f1d0c42ae44d28a5c6435cc0cbcbceca0c"} Jan 28 19:27:37 crc kubenswrapper[4767]: I0128 19:27:37.865934 4767 generic.go:334] "Generic (PLEG): container finished" podID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerID="ef97b64d562c158a76ad906affea07f1d0c42ae44d28a5c6435cc0cbcbceca0c" exitCode=0 Jan 28 19:27:37 crc kubenswrapper[4767]: I0128 19:27:37.866031 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerDied","Data":"ef97b64d562c158a76ad906affea07f1d0c42ae44d28a5c6435cc0cbcbceca0c"} Jan 28 19:27:38 crc kubenswrapper[4767]: I0128 19:27:38.879709 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerStarted","Data":"faf0e488f695c49ee141e4010c238f87b5fd43ba5bc5cf3d50b2a607dd5fcff6"} Jan 28 19:27:42 crc kubenswrapper[4767]: I0128 19:27:42.933905 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerStarted","Data":"a1d9dd05128e0fec96a7715c4456d8f3a2b5ef45d6a567eda3ac2219f4449aba"} Jan 28 19:27:42 crc kubenswrapper[4767]: I0128 19:27:42.934701 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerStarted","Data":"95cb471d70c65354b2665e57088ed57f649c6d43c6d08ea406f3f2889c87fcde"} Jan 28 19:27:42 crc kubenswrapper[4767]: I0128 19:27:42.968264 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=18.968203352 podStartE2EDuration="18.968203352s" podCreationTimestamp="2026-01-28 19:27:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:27:42.960346665 +0000 UTC m=+3468.924529539" watchObservedRunningTime="2026-01-28 19:27:42.968203352 +0000 UTC m=+3468.932386216" Jan 28 19:27:44 crc kubenswrapper[4767]: I0128 19:27:44.989319 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:45 crc kubenswrapper[4767]: I0128 19:27:45.455432 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:27:45 crc kubenswrapper[4767]: I0128 19:27:45.455767 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:27:54 crc kubenswrapper[4767]: I0128 19:27:54.990174 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:54 crc kubenswrapper[4767]: I0128 19:27:54.997239 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 19:27:55 crc kubenswrapper[4767]: I0128 19:27:55.066387 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 19:28:15 crc kubenswrapper[4767]: I0128 19:28:15.455441 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:28:15 crc kubenswrapper[4767]: I0128 19:28:15.456082 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:28:15 crc kubenswrapper[4767]: I0128 19:28:15.456149 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:28:15 crc kubenswrapper[4767]: I0128 19:28:15.457430 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"34ec71217a6b006a3a33c4eb869cd5a266f9db72104e528bff51b018d9e58d57"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:28:15 crc kubenswrapper[4767]: I0128 19:28:15.457534 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://34ec71217a6b006a3a33c4eb869cd5a266f9db72104e528bff51b018d9e58d57" gracePeriod=600 Jan 28 19:28:16 crc kubenswrapper[4767]: I0128 19:28:16.326089 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="34ec71217a6b006a3a33c4eb869cd5a266f9db72104e528bff51b018d9e58d57" exitCode=0 Jan 28 19:28:16 crc kubenswrapper[4767]: I0128 19:28:16.326174 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"34ec71217a6b006a3a33c4eb869cd5a266f9db72104e528bff51b018d9e58d57"} Jan 28 19:28:16 crc kubenswrapper[4767]: I0128 19:28:16.327079 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea"} Jan 28 19:28:16 crc kubenswrapper[4767]: I0128 19:28:16.327119 4767 scope.go:117] "RemoveContainer" containerID="6a8fe937ba4ac48b05d7fb92dabc75d199513336bf302d7771978a15b079158e" Jan 28 19:28:51 crc kubenswrapper[4767]: I0128 19:28:51.331322 4767 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-66c6598f9f-9w6r9" podUID="85ceb5d8-a7fe-4e66-a20f-6a309942c1fc" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 502" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.760971 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-54g6p"] Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.764821 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.779084 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-54g6p"] Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.881643 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mh4x6\" (UniqueName: \"kubernetes.io/projected/2c5f389e-4cd8-4187-b775-84e8abeff779-kube-api-access-mh4x6\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.881887 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-catalog-content\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.882190 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-utilities\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.985077 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mh4x6\" (UniqueName: \"kubernetes.io/projected/2c5f389e-4cd8-4187-b775-84e8abeff779-kube-api-access-mh4x6\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.985221 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-catalog-content\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.985321 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-utilities\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.986130 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-catalog-content\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:00 crc kubenswrapper[4767]: I0128 19:29:00.986229 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-utilities\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:01 crc kubenswrapper[4767]: I0128 19:29:01.019633 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mh4x6\" (UniqueName: \"kubernetes.io/projected/2c5f389e-4cd8-4187-b775-84e8abeff779-kube-api-access-mh4x6\") pod \"community-operators-54g6p\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:01 crc kubenswrapper[4767]: I0128 19:29:01.151305 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:01 crc kubenswrapper[4767]: I0128 19:29:01.768561 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-54g6p"] Jan 28 19:29:01 crc kubenswrapper[4767]: I0128 19:29:01.848511 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54g6p" event={"ID":"2c5f389e-4cd8-4187-b775-84e8abeff779","Type":"ContainerStarted","Data":"6b304b8a7ab50540e30e66a058fe4232af29d95c99067354c6692f575142f0b5"} Jan 28 19:29:02 crc kubenswrapper[4767]: I0128 19:29:02.862536 4767 generic.go:334] "Generic (PLEG): container finished" podID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerID="a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264" exitCode=0 Jan 28 19:29:02 crc kubenswrapper[4767]: I0128 19:29:02.862616 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54g6p" event={"ID":"2c5f389e-4cd8-4187-b775-84e8abeff779","Type":"ContainerDied","Data":"a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264"} Jan 28 19:29:02 crc kubenswrapper[4767]: I0128 19:29:02.866540 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:29:03 crc kubenswrapper[4767]: I0128 19:29:03.876124 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54g6p" event={"ID":"2c5f389e-4cd8-4187-b775-84e8abeff779","Type":"ContainerStarted","Data":"84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030"} Jan 28 19:29:05 crc kubenswrapper[4767]: I0128 19:29:05.899590 4767 generic.go:334] "Generic (PLEG): container finished" podID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerID="84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030" exitCode=0 Jan 28 19:29:05 crc kubenswrapper[4767]: I0128 19:29:05.899672 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54g6p" event={"ID":"2c5f389e-4cd8-4187-b775-84e8abeff779","Type":"ContainerDied","Data":"84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030"} Jan 28 19:29:06 crc kubenswrapper[4767]: I0128 19:29:06.961289 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54g6p" event={"ID":"2c5f389e-4cd8-4187-b775-84e8abeff779","Type":"ContainerStarted","Data":"c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa"} Jan 28 19:29:07 crc kubenswrapper[4767]: I0128 19:29:07.007462 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-54g6p" podStartSLOduration=3.596137397 podStartE2EDuration="7.007441057s" podCreationTimestamp="2026-01-28 19:29:00 +0000 UTC" firstStartedPulling="2026-01-28 19:29:02.866301188 +0000 UTC m=+3548.830484062" lastFinishedPulling="2026-01-28 19:29:06.277604848 +0000 UTC m=+3552.241787722" observedRunningTime="2026-01-28 19:29:06.996634958 +0000 UTC m=+3552.960817832" watchObservedRunningTime="2026-01-28 19:29:07.007441057 +0000 UTC m=+3552.971623931" Jan 28 19:29:11 crc kubenswrapper[4767]: I0128 19:29:11.152128 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:11 crc kubenswrapper[4767]: I0128 19:29:11.152803 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:11 crc kubenswrapper[4767]: I0128 19:29:11.211469 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:12 crc kubenswrapper[4767]: I0128 19:29:12.088605 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:12 crc kubenswrapper[4767]: I0128 19:29:12.163730 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-54g6p"] Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.032405 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-54g6p" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="registry-server" containerID="cri-o://c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa" gracePeriod=2 Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.567598 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.727521 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-catalog-content\") pod \"2c5f389e-4cd8-4187-b775-84e8abeff779\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.727770 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mh4x6\" (UniqueName: \"kubernetes.io/projected/2c5f389e-4cd8-4187-b775-84e8abeff779-kube-api-access-mh4x6\") pod \"2c5f389e-4cd8-4187-b775-84e8abeff779\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.727824 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-utilities\") pod \"2c5f389e-4cd8-4187-b775-84e8abeff779\" (UID: \"2c5f389e-4cd8-4187-b775-84e8abeff779\") " Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.729229 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-utilities" (OuterVolumeSpecName: "utilities") pod "2c5f389e-4cd8-4187-b775-84e8abeff779" (UID: "2c5f389e-4cd8-4187-b775-84e8abeff779"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.736683 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c5f389e-4cd8-4187-b775-84e8abeff779-kube-api-access-mh4x6" (OuterVolumeSpecName: "kube-api-access-mh4x6") pod "2c5f389e-4cd8-4187-b775-84e8abeff779" (UID: "2c5f389e-4cd8-4187-b775-84e8abeff779"). InnerVolumeSpecName "kube-api-access-mh4x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.831921 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mh4x6\" (UniqueName: \"kubernetes.io/projected/2c5f389e-4cd8-4187-b775-84e8abeff779-kube-api-access-mh4x6\") on node \"crc\" DevicePath \"\"" Jan 28 19:29:14 crc kubenswrapper[4767]: I0128 19:29:14.832248 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.075947 4767 generic.go:334] "Generic (PLEG): container finished" podID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerID="c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa" exitCode=0 Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.076049 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54g6p" event={"ID":"2c5f389e-4cd8-4187-b775-84e8abeff779","Type":"ContainerDied","Data":"c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa"} Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.076094 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-54g6p" event={"ID":"2c5f389e-4cd8-4187-b775-84e8abeff779","Type":"ContainerDied","Data":"6b304b8a7ab50540e30e66a058fe4232af29d95c99067354c6692f575142f0b5"} Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.076125 4767 scope.go:117] "RemoveContainer" containerID="c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.076301 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-54g6p" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.108550 4767 scope.go:117] "RemoveContainer" containerID="84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.131961 4767 scope.go:117] "RemoveContainer" containerID="a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.144597 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c5f389e-4cd8-4187-b775-84e8abeff779" (UID: "2c5f389e-4cd8-4187-b775-84e8abeff779"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.189804 4767 scope.go:117] "RemoveContainer" containerID="c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa" Jan 28 19:29:15 crc kubenswrapper[4767]: E0128 19:29:15.190511 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa\": container with ID starting with c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa not found: ID does not exist" containerID="c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.190549 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa"} err="failed to get container status \"c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa\": rpc error: code = NotFound desc = could not find container \"c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa\": container with ID starting with c1701eaa76d68e37fd0813cf9eb992daaef4125c6bac295591671d77121bdcfa not found: ID does not exist" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.190597 4767 scope.go:117] "RemoveContainer" containerID="84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030" Jan 28 19:29:15 crc kubenswrapper[4767]: E0128 19:29:15.191095 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030\": container with ID starting with 84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030 not found: ID does not exist" containerID="84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.191147 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030"} err="failed to get container status \"84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030\": rpc error: code = NotFound desc = could not find container \"84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030\": container with ID starting with 84b25e7b2519c5fe26564b9647fc8c66b806a163c25e10f0e3b9cbf72c7d8030 not found: ID does not exist" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.191167 4767 scope.go:117] "RemoveContainer" containerID="a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264" Jan 28 19:29:15 crc kubenswrapper[4767]: E0128 19:29:15.191469 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264\": container with ID starting with a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264 not found: ID does not exist" containerID="a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.191499 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264"} err="failed to get container status \"a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264\": rpc error: code = NotFound desc = could not find container \"a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264\": container with ID starting with a291e1d5950397a2b1d8b98f59c4e43e9992887e8d301953aaee33e43e634264 not found: ID does not exist" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.244749 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c5f389e-4cd8-4187-b775-84e8abeff779-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.419140 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-54g6p"] Jan 28 19:29:15 crc kubenswrapper[4767]: I0128 19:29:15.431292 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-54g6p"] Jan 28 19:29:16 crc kubenswrapper[4767]: I0128 19:29:16.811751 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" path="/var/lib/kubelet/pods/2c5f389e-4cd8-4187-b775-84e8abeff779/volumes" Jan 28 19:29:35 crc kubenswrapper[4767]: I0128 19:29:35.050054 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-create-4dn5d"] Jan 28 19:29:35 crc kubenswrapper[4767]: I0128 19:29:35.065025 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-1000-account-create-update-qv25f"] Jan 28 19:29:35 crc kubenswrapper[4767]: I0128 19:29:35.078162 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-create-4dn5d"] Jan 28 19:29:35 crc kubenswrapper[4767]: I0128 19:29:35.090888 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-1000-account-create-update-qv25f"] Jan 28 19:29:36 crc kubenswrapper[4767]: I0128 19:29:36.810637 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4174c5fe-9a20-41cf-b261-272c0c99734f" path="/var/lib/kubelet/pods/4174c5fe-9a20-41cf-b261-272c0c99734f/volumes" Jan 28 19:29:36 crc kubenswrapper[4767]: I0128 19:29:36.813298 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b83f436b-cd7e-4591-be88-69f803dde429" path="/var/lib/kubelet/pods/b83f436b-cd7e-4591-be88-69f803dde429/volumes" Jan 28 19:29:46 crc kubenswrapper[4767]: I0128 19:29:46.037250 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-db-sync-trb59"] Jan 28 19:29:46 crc kubenswrapper[4767]: I0128 19:29:46.050309 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-db-sync-trb59"] Jan 28 19:29:46 crc kubenswrapper[4767]: I0128 19:29:46.832704 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d099bfa1-60e9-4edd-8457-d3825393e7d0" path="/var/lib/kubelet/pods/d099bfa1-60e9-4edd-8457-d3825393e7d0/volumes" Jan 28 19:29:53 crc kubenswrapper[4767]: I0128 19:29:53.723333 4767 scope.go:117] "RemoveContainer" containerID="b6930c79f40b830058e6ca30ae1f47807208a7f869e6d15809859bc7a5a28f1b" Jan 28 19:29:53 crc kubenswrapper[4767]: I0128 19:29:53.756725 4767 scope.go:117] "RemoveContainer" containerID="eed5a0c7aa341e0be51fdb9cb9bc7d01dad5bb689cfb7d7fb6388ef6ae4966c4" Jan 28 19:29:53 crc kubenswrapper[4767]: I0128 19:29:53.827346 4767 scope.go:117] "RemoveContainer" containerID="f02ce9d0c1fed72b255ca5cefc86570c6a36859d50d5613b3e6096cb76d44929" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.159479 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t"] Jan 28 19:30:00 crc kubenswrapper[4767]: E0128 19:30:00.160757 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="extract-utilities" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.160864 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="extract-utilities" Jan 28 19:30:00 crc kubenswrapper[4767]: E0128 19:30:00.160891 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="extract-content" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.160899 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="extract-content" Jan 28 19:30:00 crc kubenswrapper[4767]: E0128 19:30:00.160923 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="registry-server" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.160931 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="registry-server" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.161173 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c5f389e-4cd8-4187-b775-84e8abeff779" containerName="registry-server" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.162243 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.165328 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.166134 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.174261 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t"] Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.317018 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fbd2a48-81a8-4010-b325-fd4361dde8e5-secret-volume\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.317084 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqxrg\" (UniqueName: \"kubernetes.io/projected/2fbd2a48-81a8-4010-b325-fd4361dde8e5-kube-api-access-kqxrg\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.317137 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fbd2a48-81a8-4010-b325-fd4361dde8e5-config-volume\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.419465 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fbd2a48-81a8-4010-b325-fd4361dde8e5-config-volume\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.419716 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fbd2a48-81a8-4010-b325-fd4361dde8e5-secret-volume\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.419744 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqxrg\" (UniqueName: \"kubernetes.io/projected/2fbd2a48-81a8-4010-b325-fd4361dde8e5-kube-api-access-kqxrg\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.420808 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fbd2a48-81a8-4010-b325-fd4361dde8e5-config-volume\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.435032 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fbd2a48-81a8-4010-b325-fd4361dde8e5-secret-volume\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.447407 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqxrg\" (UniqueName: \"kubernetes.io/projected/2fbd2a48-81a8-4010-b325-fd4361dde8e5-kube-api-access-kqxrg\") pod \"collect-profiles-29493810-ttr9t\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.496289 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:00 crc kubenswrapper[4767]: I0128 19:30:00.985938 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t"] Jan 28 19:30:01 crc kubenswrapper[4767]: I0128 19:30:01.559587 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" event={"ID":"2fbd2a48-81a8-4010-b325-fd4361dde8e5","Type":"ContainerStarted","Data":"f22e0dadba843767c0def821b4c5a1f9e6140262163119b2dc103847211a5aac"} Jan 28 19:30:01 crc kubenswrapper[4767]: I0128 19:30:01.559636 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" event={"ID":"2fbd2a48-81a8-4010-b325-fd4361dde8e5","Type":"ContainerStarted","Data":"862db349ccb66153274e5749c4177950830f1854cd7c2bc385e609675178ba32"} Jan 28 19:30:01 crc kubenswrapper[4767]: I0128 19:30:01.586032 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" podStartSLOduration=1.586010997 podStartE2EDuration="1.586010997s" podCreationTimestamp="2026-01-28 19:30:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:30:01.576168447 +0000 UTC m=+3607.540351341" watchObservedRunningTime="2026-01-28 19:30:01.586010997 +0000 UTC m=+3607.550193871" Jan 28 19:30:02 crc kubenswrapper[4767]: I0128 19:30:02.580485 4767 generic.go:334] "Generic (PLEG): container finished" podID="2fbd2a48-81a8-4010-b325-fd4361dde8e5" containerID="f22e0dadba843767c0def821b4c5a1f9e6140262163119b2dc103847211a5aac" exitCode=0 Jan 28 19:30:02 crc kubenswrapper[4767]: I0128 19:30:02.581026 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" event={"ID":"2fbd2a48-81a8-4010-b325-fd4361dde8e5","Type":"ContainerDied","Data":"f22e0dadba843767c0def821b4c5a1f9e6140262163119b2dc103847211a5aac"} Jan 28 19:30:03 crc kubenswrapper[4767]: I0128 19:30:03.989286 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.107474 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqxrg\" (UniqueName: \"kubernetes.io/projected/2fbd2a48-81a8-4010-b325-fd4361dde8e5-kube-api-access-kqxrg\") pod \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.108180 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fbd2a48-81a8-4010-b325-fd4361dde8e5-config-volume\") pod \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.108264 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fbd2a48-81a8-4010-b325-fd4361dde8e5-secret-volume\") pod \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\" (UID: \"2fbd2a48-81a8-4010-b325-fd4361dde8e5\") " Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.109801 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fbd2a48-81a8-4010-b325-fd4361dde8e5-config-volume" (OuterVolumeSpecName: "config-volume") pod "2fbd2a48-81a8-4010-b325-fd4361dde8e5" (UID: "2fbd2a48-81a8-4010-b325-fd4361dde8e5"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.115753 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fbd2a48-81a8-4010-b325-fd4361dde8e5-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "2fbd2a48-81a8-4010-b325-fd4361dde8e5" (UID: "2fbd2a48-81a8-4010-b325-fd4361dde8e5"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.120978 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fbd2a48-81a8-4010-b325-fd4361dde8e5-kube-api-access-kqxrg" (OuterVolumeSpecName: "kube-api-access-kqxrg") pod "2fbd2a48-81a8-4010-b325-fd4361dde8e5" (UID: "2fbd2a48-81a8-4010-b325-fd4361dde8e5"). InnerVolumeSpecName "kube-api-access-kqxrg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.212001 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqxrg\" (UniqueName: \"kubernetes.io/projected/2fbd2a48-81a8-4010-b325-fd4361dde8e5-kube-api-access-kqxrg\") on node \"crc\" DevicePath \"\"" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.212059 4767 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2fbd2a48-81a8-4010-b325-fd4361dde8e5-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.212069 4767 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/2fbd2a48-81a8-4010-b325-fd4361dde8e5-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.604971 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" event={"ID":"2fbd2a48-81a8-4010-b325-fd4361dde8e5","Type":"ContainerDied","Data":"862db349ccb66153274e5749c4177950830f1854cd7c2bc385e609675178ba32"} Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.605241 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="862db349ccb66153274e5749c4177950830f1854cd7c2bc385e609675178ba32" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.605051 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493810-ttr9t" Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.666628 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl"] Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.679865 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493765-pwhjl"] Jan 28 19:30:04 crc kubenswrapper[4767]: I0128 19:30:04.810156 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f42ec511-1853-412d-930e-8908b4ca7619" path="/var/lib/kubelet/pods/f42ec511-1853-412d-930e-8908b4ca7619/volumes" Jan 28 19:30:15 crc kubenswrapper[4767]: I0128 19:30:15.454957 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:30:15 crc kubenswrapper[4767]: I0128 19:30:15.455562 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:30:45 crc kubenswrapper[4767]: I0128 19:30:45.456326 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:30:45 crc kubenswrapper[4767]: I0128 19:30:45.457068 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:30:53 crc kubenswrapper[4767]: I0128 19:30:53.990000 4767 scope.go:117] "RemoveContainer" containerID="7ebd66cd982a4c7988ab09f7878b28346c5a8c818a257bc93d6637397e86f2ac" Jan 28 19:31:15 crc kubenswrapper[4767]: I0128 19:31:15.455355 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:31:15 crc kubenswrapper[4767]: I0128 19:31:15.456262 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:31:15 crc kubenswrapper[4767]: I0128 19:31:15.456350 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:31:15 crc kubenswrapper[4767]: I0128 19:31:15.457491 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:31:15 crc kubenswrapper[4767]: I0128 19:31:15.457567 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" gracePeriod=600 Jan 28 19:31:15 crc kubenswrapper[4767]: E0128 19:31:15.595924 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:31:16 crc kubenswrapper[4767]: I0128 19:31:16.390952 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" exitCode=0 Jan 28 19:31:16 crc kubenswrapper[4767]: I0128 19:31:16.390997 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea"} Jan 28 19:31:16 crc kubenswrapper[4767]: I0128 19:31:16.391330 4767 scope.go:117] "RemoveContainer" containerID="34ec71217a6b006a3a33c4eb869cd5a266f9db72104e528bff51b018d9e58d57" Jan 28 19:31:16 crc kubenswrapper[4767]: I0128 19:31:16.392688 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:31:16 crc kubenswrapper[4767]: E0128 19:31:16.393480 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:31:21 crc kubenswrapper[4767]: I0128 19:31:21.128060 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-79f6f75b9c-dhf5c_0d7c50d3-1348-43e5-a8fa-f05cd53d2a42/manager/0.log" Jan 28 19:31:22 crc kubenswrapper[4767]: I0128 19:31:22.523911 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:31:22 crc kubenswrapper[4767]: I0128 19:31:22.524871 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-listener" containerID="cri-o://af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e" gracePeriod=30 Jan 28 19:31:22 crc kubenswrapper[4767]: I0128 19:31:22.524900 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-evaluator" containerID="cri-o://e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f" gracePeriod=30 Jan 28 19:31:22 crc kubenswrapper[4767]: I0128 19:31:22.524904 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-notifier" containerID="cri-o://bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52" gracePeriod=30 Jan 28 19:31:22 crc kubenswrapper[4767]: I0128 19:31:22.525032 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/aodh-0" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-api" containerID="cri-o://2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89" gracePeriod=30 Jan 28 19:31:23 crc kubenswrapper[4767]: I0128 19:31:23.500475 4767 generic.go:334] "Generic (PLEG): container finished" podID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerID="e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f" exitCode=0 Jan 28 19:31:23 crc kubenswrapper[4767]: I0128 19:31:23.501021 4767 generic.go:334] "Generic (PLEG): container finished" podID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerID="2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89" exitCode=0 Jan 28 19:31:23 crc kubenswrapper[4767]: I0128 19:31:23.500520 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerDied","Data":"e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f"} Jan 28 19:31:23 crc kubenswrapper[4767]: I0128 19:31:23.501095 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerDied","Data":"2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89"} Jan 28 19:31:26 crc kubenswrapper[4767]: I0128 19:31:26.536039 4767 generic.go:334] "Generic (PLEG): container finished" podID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerID="bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52" exitCode=0 Jan 28 19:31:26 crc kubenswrapper[4767]: I0128 19:31:26.536136 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerDied","Data":"bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52"} Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.364585 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.435571 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-public-tls-certs\") pod \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.435681 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-combined-ca-bundle\") pod \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.435749 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-internal-tls-certs\") pod \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.435954 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-config-data\") pod \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.435999 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6qsgx\" (UniqueName: \"kubernetes.io/projected/3cab4d05-5139-401e-b6f1-9818d4b7fe85-kube-api-access-6qsgx\") pod \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.436034 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-scripts\") pod \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\" (UID: \"3cab4d05-5139-401e-b6f1-9818d4b7fe85\") " Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.454635 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cab4d05-5139-401e-b6f1-9818d4b7fe85-kube-api-access-6qsgx" (OuterVolumeSpecName: "kube-api-access-6qsgx") pod "3cab4d05-5139-401e-b6f1-9818d4b7fe85" (UID: "3cab4d05-5139-401e-b6f1-9818d4b7fe85"). InnerVolumeSpecName "kube-api-access-6qsgx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.458579 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-scripts" (OuterVolumeSpecName: "scripts") pod "3cab4d05-5139-401e-b6f1-9818d4b7fe85" (UID: "3cab4d05-5139-401e-b6f1-9818d4b7fe85"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.530659 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3cab4d05-5139-401e-b6f1-9818d4b7fe85" (UID: "3cab4d05-5139-401e-b6f1-9818d4b7fe85"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.586788 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3cab4d05-5139-401e-b6f1-9818d4b7fe85" (UID: "3cab4d05-5139-401e-b6f1-9818d4b7fe85"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.600731 4767 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.600789 4767 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.600801 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6qsgx\" (UniqueName: \"kubernetes.io/projected/3cab4d05-5139-401e-b6f1-9818d4b7fe85-kube-api-access-6qsgx\") on node \"crc\" DevicePath \"\"" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.600819 4767 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-scripts\") on node \"crc\" DevicePath \"\"" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.613798 4767 generic.go:334] "Generic (PLEG): container finished" podID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerID="af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e" exitCode=0 Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.613898 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerDied","Data":"af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e"} Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.613960 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"3cab4d05-5139-401e-b6f1-9818d4b7fe85","Type":"ContainerDied","Data":"7c5382633339b38387197be2da69c52ee54bfeb574c04937603fc3803aef0118"} Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.613983 4767 scope.go:117] "RemoveContainer" containerID="af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.614327 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.652991 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3cab4d05-5139-401e-b6f1-9818d4b7fe85" (UID: "3cab4d05-5139-401e-b6f1-9818d4b7fe85"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.673263 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-config-data" (OuterVolumeSpecName: "config-data") pod "3cab4d05-5139-401e-b6f1-9818d4b7fe85" (UID: "3cab4d05-5139-401e-b6f1-9818d4b7fe85"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.703884 4767 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.703946 4767 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3cab4d05-5139-401e-b6f1-9818d4b7fe85-config-data\") on node \"crc\" DevicePath \"\"" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.760113 4767 scope.go:117] "RemoveContainer" containerID="bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.797616 4767 scope.go:117] "RemoveContainer" containerID="e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.822411 4767 scope.go:117] "RemoveContainer" containerID="2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.845027 4767 scope.go:117] "RemoveContainer" containerID="af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.845659 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e\": container with ID starting with af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e not found: ID does not exist" containerID="af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.845720 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e"} err="failed to get container status \"af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e\": rpc error: code = NotFound desc = could not find container \"af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e\": container with ID starting with af36ffbaa23bc4ae25738e98666cc22044b6889a9018e4cdcc07a196e2ec292e not found: ID does not exist" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.845741 4767 scope.go:117] "RemoveContainer" containerID="bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.846069 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52\": container with ID starting with bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52 not found: ID does not exist" containerID="bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.846099 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52"} err="failed to get container status \"bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52\": rpc error: code = NotFound desc = could not find container \"bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52\": container with ID starting with bd90c03b3885909dcb87fbe4cc475c5e9a42a17b67650ea3e6a88c72d1b05b52 not found: ID does not exist" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.846115 4767 scope.go:117] "RemoveContainer" containerID="e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.846599 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f\": container with ID starting with e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f not found: ID does not exist" containerID="e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.846623 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f"} err="failed to get container status \"e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f\": rpc error: code = NotFound desc = could not find container \"e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f\": container with ID starting with e1e98503149cfa16ccf365394a2430b30a9eda3c1a2f20e77ba221a0a5b32b0f not found: ID does not exist" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.846639 4767 scope.go:117] "RemoveContainer" containerID="2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.846911 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89\": container with ID starting with 2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89 not found: ID does not exist" containerID="2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.846945 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89"} err="failed to get container status \"2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89\": rpc error: code = NotFound desc = could not find container \"2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89\": container with ID starting with 2d7f6945d0f89727ee070275451bff66e6b3c542be07949d49b792fcb5329d89 not found: ID does not exist" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.961075 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/aodh-0"] Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.974472 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/aodh-0"] Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.988286 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/aodh-0"] Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.988940 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-notifier" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.988964 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-notifier" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.988980 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-evaluator" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.988988 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-evaluator" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.989006 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fbd2a48-81a8-4010-b325-fd4361dde8e5" containerName="collect-profiles" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989012 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fbd2a48-81a8-4010-b325-fd4361dde8e5" containerName="collect-profiles" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.989023 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-listener" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989029 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-listener" Jan 28 19:31:27 crc kubenswrapper[4767]: E0128 19:31:27.989046 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-api" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989051 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-api" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989314 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-evaluator" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989340 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-api" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989352 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-listener" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989366 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fbd2a48-81a8-4010-b325-fd4361dde8e5" containerName="collect-profiles" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.989382 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" containerName="aodh-notifier" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.991427 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.994323 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-scripts" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.994684 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-public-svc" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.994945 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-aodh-internal-svc" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.995096 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"aodh-config-data" Jan 28 19:31:27 crc kubenswrapper[4767]: I0128 19:31:27.995230 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-autoscaling-dockercfg-chl48" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.008583 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.115732 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-internal-tls-certs\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.116187 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-combined-ca-bundle\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.116260 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-public-tls-certs\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.116391 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pznvf\" (UniqueName: \"kubernetes.io/projected/5367cd99-a28a-4678-9add-df5bf4e069fb-kube-api-access-pznvf\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.116490 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-config-data\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.116748 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-scripts\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.218799 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-scripts\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.218871 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-internal-tls-certs\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.218954 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-combined-ca-bundle\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.218976 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-public-tls-certs\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.219018 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pznvf\" (UniqueName: \"kubernetes.io/projected/5367cd99-a28a-4678-9add-df5bf4e069fb-kube-api-access-pznvf\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.219081 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-config-data\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.223825 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-scripts\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.224318 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-public-tls-certs\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.224413 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-internal-tls-certs\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.224834 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-config-data\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.226542 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5367cd99-a28a-4678-9add-df5bf4e069fb-combined-ca-bundle\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.240642 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pznvf\" (UniqueName: \"kubernetes.io/projected/5367cd99-a28a-4678-9add-df5bf4e069fb-kube-api-access-pznvf\") pod \"aodh-0\" (UID: \"5367cd99-a28a-4678-9add-df5bf4e069fb\") " pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.319442 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/aodh-0" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.814132 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cab4d05-5139-401e-b6f1-9818d4b7fe85" path="/var/lib/kubelet/pods/3cab4d05-5139-401e-b6f1-9818d4b7fe85/volumes" Jan 28 19:31:28 crc kubenswrapper[4767]: I0128 19:31:28.817852 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/aodh-0"] Jan 28 19:31:29 crc kubenswrapper[4767]: I0128 19:31:29.640436 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5367cd99-a28a-4678-9add-df5bf4e069fb","Type":"ContainerStarted","Data":"e0427c8fc14ae8d7f467cf308a2b1a5a9791b3ae5a0b301773ed8617d334c60b"} Jan 28 19:31:29 crc kubenswrapper[4767]: I0128 19:31:29.640921 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5367cd99-a28a-4678-9add-df5bf4e069fb","Type":"ContainerStarted","Data":"bf46604c7c61fe8303a27d870307a403579e3c995289fd2ee922f28966cccfbf"} Jan 28 19:31:29 crc kubenswrapper[4767]: I0128 19:31:29.796405 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:31:29 crc kubenswrapper[4767]: E0128 19:31:29.796704 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:31:31 crc kubenswrapper[4767]: I0128 19:31:31.669339 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5367cd99-a28a-4678-9add-df5bf4e069fb","Type":"ContainerStarted","Data":"581c9ff7d07ab73c72591870d92485f081fc2cafbeaa681e1b08cc7c048343a2"} Jan 28 19:31:31 crc kubenswrapper[4767]: I0128 19:31:31.669858 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5367cd99-a28a-4678-9add-df5bf4e069fb","Type":"ContainerStarted","Data":"289762d9704dc7d94573b8f3cd71f05f504e797024257be030623ff2573a8341"} Jan 28 19:31:32 crc kubenswrapper[4767]: I0128 19:31:32.683284 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/aodh-0" event={"ID":"5367cd99-a28a-4678-9add-df5bf4e069fb","Type":"ContainerStarted","Data":"b66eb65fdd3b837bac2ecad77ee6c510fd72a2960c245a3e4dab8d273d87ce31"} Jan 28 19:31:32 crc kubenswrapper[4767]: I0128 19:31:32.706533 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/aodh-0" podStartSLOduration=2.271159881 podStartE2EDuration="5.706505467s" podCreationTimestamp="2026-01-28 19:31:27 +0000 UTC" firstStartedPulling="2026-01-28 19:31:28.813963234 +0000 UTC m=+3694.778146108" lastFinishedPulling="2026-01-28 19:31:32.24930882 +0000 UTC m=+3698.213491694" observedRunningTime="2026-01-28 19:31:32.706431094 +0000 UTC m=+3698.670613958" watchObservedRunningTime="2026-01-28 19:31:32.706505467 +0000 UTC m=+3698.670688351" Jan 28 19:31:42 crc kubenswrapper[4767]: I0128 19:31:42.795914 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:31:42 crc kubenswrapper[4767]: E0128 19:31:42.797351 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:31:57 crc kubenswrapper[4767]: I0128 19:31:57.796233 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:31:57 crc kubenswrapper[4767]: E0128 19:31:57.797200 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:32:10 crc kubenswrapper[4767]: I0128 19:32:10.797666 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:32:10 crc kubenswrapper[4767]: E0128 19:32:10.798787 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:32:23 crc kubenswrapper[4767]: I0128 19:32:23.795696 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:32:23 crc kubenswrapper[4767]: E0128 19:32:23.796737 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:32:35 crc kubenswrapper[4767]: I0128 19:32:35.796245 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:32:35 crc kubenswrapper[4767]: E0128 19:32:35.797009 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:32:50 crc kubenswrapper[4767]: I0128 19:32:50.797084 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:32:50 crc kubenswrapper[4767]: E0128 19:32:50.798318 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:33:03 crc kubenswrapper[4767]: I0128 19:33:03.795482 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:33:03 crc kubenswrapper[4767]: E0128 19:33:03.797276 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:33:15 crc kubenswrapper[4767]: I0128 19:33:15.796341 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:33:15 crc kubenswrapper[4767]: E0128 19:33:15.797686 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:33:28 crc kubenswrapper[4767]: I0128 19:33:28.797509 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:33:28 crc kubenswrapper[4767]: E0128 19:33:28.798322 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:33:43 crc kubenswrapper[4767]: I0128 19:33:43.796704 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:33:43 crc kubenswrapper[4767]: E0128 19:33:43.797629 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:33:57 crc kubenswrapper[4767]: I0128 19:33:57.796312 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:33:57 crc kubenswrapper[4767]: E0128 19:33:57.797147 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:34:00 crc kubenswrapper[4767]: I0128 19:34:00.866530 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fbd6w"] Jan 28 19:34:00 crc kubenswrapper[4767]: I0128 19:34:00.900199 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:00 crc kubenswrapper[4767]: I0128 19:34:00.900879 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fbd6w"] Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.038863 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5qhzt\" (UniqueName: \"kubernetes.io/projected/ae199241-8747-4750-ae65-b1794a14d8f2-kube-api-access-5qhzt\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.038944 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-utilities\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.039113 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-catalog-content\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.052638 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rntrt"] Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.055727 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.070408 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rntrt"] Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.141989 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5qhzt\" (UniqueName: \"kubernetes.io/projected/ae199241-8747-4750-ae65-b1794a14d8f2-kube-api-access-5qhzt\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.142054 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-utilities\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.142228 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-catalog-content\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.142302 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-catalog-content\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.142350 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wlqgb\" (UniqueName: \"kubernetes.io/projected/2f9159f2-6e04-48b0-b645-cc0b516143f5-kube-api-access-wlqgb\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.142418 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-utilities\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.142939 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-catalog-content\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.143059 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-utilities\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.164514 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5qhzt\" (UniqueName: \"kubernetes.io/projected/ae199241-8747-4750-ae65-b1794a14d8f2-kube-api-access-5qhzt\") pod \"redhat-marketplace-fbd6w\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.231413 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.245173 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-catalog-content\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.245658 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wlqgb\" (UniqueName: \"kubernetes.io/projected/2f9159f2-6e04-48b0-b645-cc0b516143f5-kube-api-access-wlqgb\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.245773 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-catalog-content\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.245941 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-utilities\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.246288 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-utilities\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.271805 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wlqgb\" (UniqueName: \"kubernetes.io/projected/2f9159f2-6e04-48b0-b645-cc0b516143f5-kube-api-access-wlqgb\") pod \"redhat-operators-rntrt\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.386275 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.866380 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fbd6w"] Jan 28 19:34:01 crc kubenswrapper[4767]: I0128 19:34:01.987837 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rntrt"] Jan 28 19:34:02 crc kubenswrapper[4767]: I0128 19:34:02.342186 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerStarted","Data":"955903ceb671f87018620ac77fd3cb39a8ceaa17fec0ee6db461e8fac14d25b4"} Jan 28 19:34:02 crc kubenswrapper[4767]: I0128 19:34:02.342888 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerStarted","Data":"c2b3dc8102c8cb4bb8fcfdcb275153920a627ae9eef66910cdda59dc144776e2"} Jan 28 19:34:02 crc kubenswrapper[4767]: I0128 19:34:02.347000 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerStarted","Data":"5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c"} Jan 28 19:34:02 crc kubenswrapper[4767]: I0128 19:34:02.347046 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerStarted","Data":"81900e5d0440fbd9f60a2492c35c5ceb77596a2140a886f34d7fca954b523865"} Jan 28 19:34:03 crc kubenswrapper[4767]: I0128 19:34:03.360276 4767 generic.go:334] "Generic (PLEG): container finished" podID="ae199241-8747-4750-ae65-b1794a14d8f2" containerID="955903ceb671f87018620ac77fd3cb39a8ceaa17fec0ee6db461e8fac14d25b4" exitCode=0 Jan 28 19:34:03 crc kubenswrapper[4767]: I0128 19:34:03.360376 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerDied","Data":"955903ceb671f87018620ac77fd3cb39a8ceaa17fec0ee6db461e8fac14d25b4"} Jan 28 19:34:03 crc kubenswrapper[4767]: I0128 19:34:03.365936 4767 generic.go:334] "Generic (PLEG): container finished" podID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerID="5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c" exitCode=0 Jan 28 19:34:03 crc kubenswrapper[4767]: I0128 19:34:03.365985 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerDied","Data":"5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c"} Jan 28 19:34:03 crc kubenswrapper[4767]: I0128 19:34:03.368827 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:34:04 crc kubenswrapper[4767]: I0128 19:34:04.379942 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerStarted","Data":"afb19b3f752cf0a8d3c26c0e4a666bb6d5e8a95e20c9a7609b0b74b5722c9a82"} Jan 28 19:34:05 crc kubenswrapper[4767]: I0128 19:34:05.396764 4767 generic.go:334] "Generic (PLEG): container finished" podID="ae199241-8747-4750-ae65-b1794a14d8f2" containerID="afb19b3f752cf0a8d3c26c0e4a666bb6d5e8a95e20c9a7609b0b74b5722c9a82" exitCode=0 Jan 28 19:34:05 crc kubenswrapper[4767]: I0128 19:34:05.396876 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerDied","Data":"afb19b3f752cf0a8d3c26c0e4a666bb6d5e8a95e20c9a7609b0b74b5722c9a82"} Jan 28 19:34:05 crc kubenswrapper[4767]: I0128 19:34:05.399812 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerStarted","Data":"8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0"} Jan 28 19:34:06 crc kubenswrapper[4767]: I0128 19:34:06.414852 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerStarted","Data":"5d8db8739f105033858e4b8b192f22a69104fda4ff88d2d2ef102bf79a536fef"} Jan 28 19:34:06 crc kubenswrapper[4767]: I0128 19:34:06.443045 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fbd6w" podStartSLOduration=2.937691149 podStartE2EDuration="6.443025115s" podCreationTimestamp="2026-01-28 19:34:00 +0000 UTC" firstStartedPulling="2026-01-28 19:34:02.344768174 +0000 UTC m=+3848.308951048" lastFinishedPulling="2026-01-28 19:34:05.85010214 +0000 UTC m=+3851.814285014" observedRunningTime="2026-01-28 19:34:06.438967647 +0000 UTC m=+3852.403150521" watchObservedRunningTime="2026-01-28 19:34:06.443025115 +0000 UTC m=+3852.407207989" Jan 28 19:34:10 crc kubenswrapper[4767]: I0128 19:34:10.458350 4767 generic.go:334] "Generic (PLEG): container finished" podID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerID="8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0" exitCode=0 Jan 28 19:34:10 crc kubenswrapper[4767]: I0128 19:34:10.458959 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerDied","Data":"8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0"} Jan 28 19:34:10 crc kubenswrapper[4767]: I0128 19:34:10.795566 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:34:10 crc kubenswrapper[4767]: E0128 19:34:10.795896 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:34:11 crc kubenswrapper[4767]: I0128 19:34:11.232245 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:11 crc kubenswrapper[4767]: I0128 19:34:11.233048 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:11 crc kubenswrapper[4767]: I0128 19:34:11.370742 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:11 crc kubenswrapper[4767]: I0128 19:34:11.471645 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerStarted","Data":"f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6"} Jan 28 19:34:11 crc kubenswrapper[4767]: I0128 19:34:11.501024 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rntrt" podStartSLOduration=2.896162609 podStartE2EDuration="10.500997425s" podCreationTimestamp="2026-01-28 19:34:01 +0000 UTC" firstStartedPulling="2026-01-28 19:34:03.368496895 +0000 UTC m=+3849.332679759" lastFinishedPulling="2026-01-28 19:34:10.973331701 +0000 UTC m=+3856.937514575" observedRunningTime="2026-01-28 19:34:11.491801955 +0000 UTC m=+3857.455984849" watchObservedRunningTime="2026-01-28 19:34:11.500997425 +0000 UTC m=+3857.465180289" Jan 28 19:34:11 crc kubenswrapper[4767]: I0128 19:34:11.533070 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:12 crc kubenswrapper[4767]: I0128 19:34:12.698268 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fbd6w"] Jan 28 19:34:14 crc kubenswrapper[4767]: I0128 19:34:14.502692 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fbd6w" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="registry-server" containerID="cri-o://5d8db8739f105033858e4b8b192f22a69104fda4ff88d2d2ef102bf79a536fef" gracePeriod=2 Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.516192 4767 generic.go:334] "Generic (PLEG): container finished" podID="ae199241-8747-4750-ae65-b1794a14d8f2" containerID="5d8db8739f105033858e4b8b192f22a69104fda4ff88d2d2ef102bf79a536fef" exitCode=0 Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.516245 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerDied","Data":"5d8db8739f105033858e4b8b192f22a69104fda4ff88d2d2ef102bf79a536fef"} Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.516608 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fbd6w" event={"ID":"ae199241-8747-4750-ae65-b1794a14d8f2","Type":"ContainerDied","Data":"c2b3dc8102c8cb4bb8fcfdcb275153920a627ae9eef66910cdda59dc144776e2"} Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.516633 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c2b3dc8102c8cb4bb8fcfdcb275153920a627ae9eef66910cdda59dc144776e2" Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.601547 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.786725 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-catalog-content\") pod \"ae199241-8747-4750-ae65-b1794a14d8f2\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.791799 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-utilities\") pod \"ae199241-8747-4750-ae65-b1794a14d8f2\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.791903 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5qhzt\" (UniqueName: \"kubernetes.io/projected/ae199241-8747-4750-ae65-b1794a14d8f2-kube-api-access-5qhzt\") pod \"ae199241-8747-4750-ae65-b1794a14d8f2\" (UID: \"ae199241-8747-4750-ae65-b1794a14d8f2\") " Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.792236 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-utilities" (OuterVolumeSpecName: "utilities") pod "ae199241-8747-4750-ae65-b1794a14d8f2" (UID: "ae199241-8747-4750-ae65-b1794a14d8f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.792850 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.804769 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae199241-8747-4750-ae65-b1794a14d8f2-kube-api-access-5qhzt" (OuterVolumeSpecName: "kube-api-access-5qhzt") pod "ae199241-8747-4750-ae65-b1794a14d8f2" (UID: "ae199241-8747-4750-ae65-b1794a14d8f2"). InnerVolumeSpecName "kube-api-access-5qhzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.828566 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ae199241-8747-4750-ae65-b1794a14d8f2" (UID: "ae199241-8747-4750-ae65-b1794a14d8f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.893777 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5qhzt\" (UniqueName: \"kubernetes.io/projected/ae199241-8747-4750-ae65-b1794a14d8f2-kube-api-access-5qhzt\") on node \"crc\" DevicePath \"\"" Jan 28 19:34:15 crc kubenswrapper[4767]: I0128 19:34:15.893810 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ae199241-8747-4750-ae65-b1794a14d8f2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:34:16 crc kubenswrapper[4767]: I0128 19:34:16.526139 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fbd6w" Jan 28 19:34:16 crc kubenswrapper[4767]: I0128 19:34:16.581012 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fbd6w"] Jan 28 19:34:16 crc kubenswrapper[4767]: I0128 19:34:16.596834 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fbd6w"] Jan 28 19:34:16 crc kubenswrapper[4767]: I0128 19:34:16.808891 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" path="/var/lib/kubelet/pods/ae199241-8747-4750-ae65-b1794a14d8f2/volumes" Jan 28 19:34:21 crc kubenswrapper[4767]: I0128 19:34:21.387307 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:21 crc kubenswrapper[4767]: I0128 19:34:21.388008 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:21 crc kubenswrapper[4767]: I0128 19:34:21.439752 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:21 crc kubenswrapper[4767]: I0128 19:34:21.625321 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:21 crc kubenswrapper[4767]: I0128 19:34:21.704061 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rntrt"] Jan 28 19:34:23 crc kubenswrapper[4767]: I0128 19:34:23.593950 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rntrt" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="registry-server" containerID="cri-o://f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6" gracePeriod=2 Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.085717 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.227667 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wlqgb\" (UniqueName: \"kubernetes.io/projected/2f9159f2-6e04-48b0-b645-cc0b516143f5-kube-api-access-wlqgb\") pod \"2f9159f2-6e04-48b0-b645-cc0b516143f5\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.227742 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-catalog-content\") pod \"2f9159f2-6e04-48b0-b645-cc0b516143f5\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.227874 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-utilities\") pod \"2f9159f2-6e04-48b0-b645-cc0b516143f5\" (UID: \"2f9159f2-6e04-48b0-b645-cc0b516143f5\") " Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.228846 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-utilities" (OuterVolumeSpecName: "utilities") pod "2f9159f2-6e04-48b0-b645-cc0b516143f5" (UID: "2f9159f2-6e04-48b0-b645-cc0b516143f5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.234670 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f9159f2-6e04-48b0-b645-cc0b516143f5-kube-api-access-wlqgb" (OuterVolumeSpecName: "kube-api-access-wlqgb") pod "2f9159f2-6e04-48b0-b645-cc0b516143f5" (UID: "2f9159f2-6e04-48b0-b645-cc0b516143f5"). InnerVolumeSpecName "kube-api-access-wlqgb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.336894 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.337343 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wlqgb\" (UniqueName: \"kubernetes.io/projected/2f9159f2-6e04-48b0-b645-cc0b516143f5-kube-api-access-wlqgb\") on node \"crc\" DevicePath \"\"" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.376991 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2f9159f2-6e04-48b0-b645-cc0b516143f5" (UID: "2f9159f2-6e04-48b0-b645-cc0b516143f5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.440386 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2f9159f2-6e04-48b0-b645-cc0b516143f5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.607112 4767 generic.go:334] "Generic (PLEG): container finished" podID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerID="f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6" exitCode=0 Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.607199 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerDied","Data":"f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6"} Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.607235 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rntrt" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.607276 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rntrt" event={"ID":"2f9159f2-6e04-48b0-b645-cc0b516143f5","Type":"ContainerDied","Data":"81900e5d0440fbd9f60a2492c35c5ceb77596a2140a886f34d7fca954b523865"} Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.607309 4767 scope.go:117] "RemoveContainer" containerID="f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.646132 4767 scope.go:117] "RemoveContainer" containerID="8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.648026 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rntrt"] Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.685085 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rntrt"] Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.691508 4767 scope.go:117] "RemoveContainer" containerID="5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.728454 4767 scope.go:117] "RemoveContainer" containerID="f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6" Jan 28 19:34:24 crc kubenswrapper[4767]: E0128 19:34:24.729598 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6\": container with ID starting with f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6 not found: ID does not exist" containerID="f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.729666 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6"} err="failed to get container status \"f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6\": rpc error: code = NotFound desc = could not find container \"f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6\": container with ID starting with f5da24ad6f49bd62a7b3f80c714a4bf7ef2a4e6888fe6d10214485856016f5f6 not found: ID does not exist" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.729704 4767 scope.go:117] "RemoveContainer" containerID="8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0" Jan 28 19:34:24 crc kubenswrapper[4767]: E0128 19:34:24.730490 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0\": container with ID starting with 8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0 not found: ID does not exist" containerID="8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.730555 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0"} err="failed to get container status \"8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0\": rpc error: code = NotFound desc = could not find container \"8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0\": container with ID starting with 8fac6d98a16f70842e2c2d3deb49161a89cb614191744496ef4258662f75eee0 not found: ID does not exist" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.730602 4767 scope.go:117] "RemoveContainer" containerID="5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c" Jan 28 19:34:24 crc kubenswrapper[4767]: E0128 19:34:24.731043 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c\": container with ID starting with 5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c not found: ID does not exist" containerID="5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.731098 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c"} err="failed to get container status \"5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c\": rpc error: code = NotFound desc = could not find container \"5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c\": container with ID starting with 5a621f26b95bf1eaed59e334a985ceebfc5cea4429f3046e826638c3a50b2a8c not found: ID does not exist" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.805076 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:34:24 crc kubenswrapper[4767]: E0128 19:34:24.805840 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:34:24 crc kubenswrapper[4767]: I0128 19:34:24.814622 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" path="/var/lib/kubelet/pods/2f9159f2-6e04-48b0-b645-cc0b516143f5/volumes" Jan 28 19:34:38 crc kubenswrapper[4767]: I0128 19:34:38.796544 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:34:38 crc kubenswrapper[4767]: E0128 19:34:38.797283 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:34:50 crc kubenswrapper[4767]: I0128 19:34:50.796455 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:34:50 crc kubenswrapper[4767]: E0128 19:34:50.797175 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:35:01 crc kubenswrapper[4767]: I0128 19:35:01.796481 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:35:01 crc kubenswrapper[4767]: E0128 19:35:01.797393 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:35:15 crc kubenswrapper[4767]: I0128 19:35:15.795690 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:35:15 crc kubenswrapper[4767]: E0128 19:35:15.796514 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:35:22 crc kubenswrapper[4767]: I0128 19:35:22.828550 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-79f6f75b9c-dhf5c_0d7c50d3-1348-43e5-a8fa-f05cd53d2a42/manager/0.log" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.935264 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ft4fg"] Jan 28 19:35:24 crc kubenswrapper[4767]: E0128 19:35:24.936169 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="extract-utilities" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936193 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="extract-utilities" Jan 28 19:35:24 crc kubenswrapper[4767]: E0128 19:35:24.936348 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="extract-content" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936360 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="extract-content" Jan 28 19:35:24 crc kubenswrapper[4767]: E0128 19:35:24.936375 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="extract-utilities" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936384 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="extract-utilities" Jan 28 19:35:24 crc kubenswrapper[4767]: E0128 19:35:24.936395 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="registry-server" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936403 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="registry-server" Jan 28 19:35:24 crc kubenswrapper[4767]: E0128 19:35:24.936423 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="registry-server" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936431 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="registry-server" Jan 28 19:35:24 crc kubenswrapper[4767]: E0128 19:35:24.936444 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="extract-content" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936453 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="extract-content" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936740 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae199241-8747-4750-ae65-b1794a14d8f2" containerName="registry-server" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.936761 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f9159f2-6e04-48b0-b645-cc0b516143f5" containerName="registry-server" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.938720 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:24 crc kubenswrapper[4767]: I0128 19:35:24.964757 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ft4fg"] Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.000592 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-utilities\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.000669 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4zktj\" (UniqueName: \"kubernetes.io/projected/c90559fe-83c6-4d44-ad19-205e7fbe5417-kube-api-access-4zktj\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.000771 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-catalog-content\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.102595 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-utilities\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.102667 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4zktj\" (UniqueName: \"kubernetes.io/projected/c90559fe-83c6-4d44-ad19-205e7fbe5417-kube-api-access-4zktj\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.102721 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-catalog-content\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.103414 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-catalog-content\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.103556 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-utilities\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.126449 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4zktj\" (UniqueName: \"kubernetes.io/projected/c90559fe-83c6-4d44-ad19-205e7fbe5417-kube-api-access-4zktj\") pod \"certified-operators-ft4fg\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.274200 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:25 crc kubenswrapper[4767]: I0128 19:35:25.859040 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ft4fg"] Jan 28 19:35:26 crc kubenswrapper[4767]: I0128 19:35:26.264037 4767 generic.go:334] "Generic (PLEG): container finished" podID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerID="29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237" exitCode=0 Jan 28 19:35:26 crc kubenswrapper[4767]: I0128 19:35:26.264449 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft4fg" event={"ID":"c90559fe-83c6-4d44-ad19-205e7fbe5417","Type":"ContainerDied","Data":"29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237"} Jan 28 19:35:26 crc kubenswrapper[4767]: I0128 19:35:26.264486 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft4fg" event={"ID":"c90559fe-83c6-4d44-ad19-205e7fbe5417","Type":"ContainerStarted","Data":"55c141efcea31b245c6b790d00729b4c8f6b714b95a9fab9bb623e169e0f25ba"} Jan 28 19:35:26 crc kubenswrapper[4767]: I0128 19:35:26.624906 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:35:26 crc kubenswrapper[4767]: I0128 19:35:26.625255 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="prometheus" containerID="cri-o://faf0e488f695c49ee141e4010c238f87b5fd43ba5bc5cf3d50b2a607dd5fcff6" gracePeriod=600 Jan 28 19:35:26 crc kubenswrapper[4767]: I0128 19:35:26.625778 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="config-reloader" containerID="cri-o://95cb471d70c65354b2665e57088ed57f649c6d43c6d08ea406f3f2889c87fcde" gracePeriod=600 Jan 28 19:35:26 crc kubenswrapper[4767]: I0128 19:35:26.625725 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/prometheus-metric-storage-0" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="thanos-sidecar" containerID="cri-o://a1d9dd05128e0fec96a7715c4456d8f3a2b5ef45d6a567eda3ac2219f4449aba" gracePeriod=600 Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.281914 4767 generic.go:334] "Generic (PLEG): container finished" podID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerID="a1d9dd05128e0fec96a7715c4456d8f3a2b5ef45d6a567eda3ac2219f4449aba" exitCode=0 Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.282275 4767 generic.go:334] "Generic (PLEG): container finished" podID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerID="95cb471d70c65354b2665e57088ed57f649c6d43c6d08ea406f3f2889c87fcde" exitCode=0 Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.282291 4767 generic.go:334] "Generic (PLEG): container finished" podID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerID="faf0e488f695c49ee141e4010c238f87b5fd43ba5bc5cf3d50b2a607dd5fcff6" exitCode=0 Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.282322 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerDied","Data":"a1d9dd05128e0fec96a7715c4456d8f3a2b5ef45d6a567eda3ac2219f4449aba"} Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.282356 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerDied","Data":"95cb471d70c65354b2665e57088ed57f649c6d43c6d08ea406f3f2889c87fcde"} Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.282371 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerDied","Data":"faf0e488f695c49ee141e4010c238f87b5fd43ba5bc5cf3d50b2a607dd5fcff6"} Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.612841 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.703960 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704107 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704183 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-secret-combined-ca-bundle\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704226 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-tls-assets\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704251 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-0\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704318 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-2\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704354 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-thanos-prometheus-http-client-file\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704401 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config-out\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704425 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-1\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704585 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-db\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704614 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704640 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fc7p\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-kube-api-access-7fc7p\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.704669 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config\") pod \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\" (UID: \"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc\") " Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.707792 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-db" (OuterVolumeSpecName: "prometheus-metric-storage-db") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "prometheus-metric-storage-db". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.708013 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-0" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-0") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.708749 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-1" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-1") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-1". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.708813 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-2" (OuterVolumeSpecName: "prometheus-metric-storage-rulefiles-2") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "prometheus-metric-storage-rulefiles-2". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.711708 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config-out" (OuterVolumeSpecName: "config-out") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "config-out". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.713388 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-secret-combined-ca-bundle" (OuterVolumeSpecName: "secret-combined-ca-bundle") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "secret-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.713422 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config" (OuterVolumeSpecName: "config") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.713426 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-tls-assets" (OuterVolumeSpecName: "tls-assets") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "tls-assets". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.714572 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-kube-api-access-7fc7p" (OuterVolumeSpecName: "kube-api-access-7fc7p") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "kube-api-access-7fc7p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.714574 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.715529 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-thanos-prometheus-http-client-file" (OuterVolumeSpecName: "thanos-prometheus-http-client-file") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "thanos-prometheus-http-client-file". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.723468 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d" (OuterVolumeSpecName: "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.797875 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:35:27 crc kubenswrapper[4767]: E0128 19:35:27.798190 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808823 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-db\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808873 4767 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808893 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fc7p\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-kube-api-access-7fc7p\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808906 4767 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808919 4767 reconciler_common.go:293] "Volume detached for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808936 4767 reconciler_common.go:293] "Volume detached for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-secret-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808955 4767 reconciler_common.go:293] "Volume detached for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-tls-assets\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808968 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-0\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.808989 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-2\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.809008 4767 reconciler_common.go:293] "Volume detached for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-thanos-prometheus-http-client-file\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.809021 4767 reconciler_common.go:293] "Volume detached for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-config-out\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.809035 4767 reconciler_common.go:293] "Volume detached for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-prometheus-metric-storage-rulefiles-1\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.832266 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config" (OuterVolumeSpecName: "web-config") pod "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" (UID: "b10ad1ff-cf88-4948-93d5-60f7eca9f9bc"). InnerVolumeSpecName "web-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:35:27 crc kubenswrapper[4767]: I0128 19:35:27.912693 4767 reconciler_common.go:293] "Volume detached for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc-web-config\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.312556 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"b10ad1ff-cf88-4948-93d5-60f7eca9f9bc","Type":"ContainerDied","Data":"36abd21613c419d8aed86163b640590a8c751805926b0cb57625bede58d26371"} Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.312643 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.314107 4767 scope.go:117] "RemoveContainer" containerID="a1d9dd05128e0fec96a7715c4456d8f3a2b5ef45d6a567eda3ac2219f4449aba" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.315356 4767 generic.go:334] "Generic (PLEG): container finished" podID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerID="27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c" exitCode=0 Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.315435 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft4fg" event={"ID":"c90559fe-83c6-4d44-ad19-205e7fbe5417","Type":"ContainerDied","Data":"27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c"} Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.343932 4767 scope.go:117] "RemoveContainer" containerID="95cb471d70c65354b2665e57088ed57f649c6d43c6d08ea406f3f2889c87fcde" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.368385 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.377620 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.379554 4767 scope.go:117] "RemoveContainer" containerID="faf0e488f695c49ee141e4010c238f87b5fd43ba5bc5cf3d50b2a607dd5fcff6" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.403426 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:35:28 crc kubenswrapper[4767]: E0128 19:35:28.403900 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="config-reloader" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.403916 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="config-reloader" Jan 28 19:35:28 crc kubenswrapper[4767]: E0128 19:35:28.403933 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="prometheus" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.403940 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="prometheus" Jan 28 19:35:28 crc kubenswrapper[4767]: E0128 19:35:28.404970 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="init-config-reloader" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.404993 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="init-config-reloader" Jan 28 19:35:28 crc kubenswrapper[4767]: E0128 19:35:28.405037 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="thanos-sidecar" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.405049 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="thanos-sidecar" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.405339 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="thanos-sidecar" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.405369 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="config-reloader" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.405383 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" containerName="prometheus" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.407279 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.409734 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-web-config" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.410079 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-metric-storage-prometheus-svc" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.410159 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-thanos-prometheus-http-client-file" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.410273 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.410453 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-2" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.411086 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-1" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.411412 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"prometheus-metric-storage-rulefiles-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.415282 4767 scope.go:117] "RemoveContainer" containerID="ef97b64d562c158a76ad906affea07f1d0c42ae44d28a5c6435cc0cbcbceca0c" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.415463 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"metric-storage-prometheus-dockercfg-b9jgx" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.419493 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"prometheus-metric-storage-tls-assets-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.434305 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.527728 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.527782 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.527999 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528156 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-db\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528196 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xm9zl\" (UniqueName: \"kubernetes.io/projected/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-kube-api-access-xm9zl\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528289 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528397 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528472 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528581 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-config\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528698 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528732 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528838 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.528870 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631036 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631110 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631169 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631224 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-db\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631245 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xm9zl\" (UniqueName: \"kubernetes.io/projected/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-kube-api-access-xm9zl\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631267 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631305 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631336 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631376 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-config\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631412 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631431 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631464 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631482 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.631864 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-db\" (UniqueName: \"kubernetes.io/empty-dir/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-db\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.632293 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-1\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-1\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.635164 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-2\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-2\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.635562 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-out\" (UniqueName: \"kubernetes.io/empty-dir/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-config-out\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.632525 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"prometheus-metric-storage-rulefiles-0\" (UniqueName: \"kubernetes.io/configmap/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-prometheus-metric-storage-rulefiles-0\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.637954 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-assets\" (UniqueName: \"kubernetes.io/projected/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-tls-assets\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.638114 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-config\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.638422 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-secret-combined-ca-bundle\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.640669 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.641018 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"thanos-prometheus-http-client-file\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-thanos-prometheus-http-client-file\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.641092 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config-tls-secret-cert-cert-metric-storage-prometh-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.669707 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\" (UniqueName: \"kubernetes.io/secret/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-web-config-tls-secret-key-cert-metric-storage-promethe-dc638c2d\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.673192 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xm9zl\" (UniqueName: \"kubernetes.io/projected/57323335-588f-4faa-9dc4-dcaf2aa2b5f0-kube-api-access-xm9zl\") pod \"prometheus-metric-storage-0\" (UID: \"57323335-588f-4faa-9dc4-dcaf2aa2b5f0\") " pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.790302 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:28 crc kubenswrapper[4767]: I0128 19:35:28.811722 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b10ad1ff-cf88-4948-93d5-60f7eca9f9bc" path="/var/lib/kubelet/pods/b10ad1ff-cf88-4948-93d5-60f7eca9f9bc/volumes" Jan 28 19:35:29 crc kubenswrapper[4767]: I0128 19:35:29.330733 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft4fg" event={"ID":"c90559fe-83c6-4d44-ad19-205e7fbe5417","Type":"ContainerStarted","Data":"85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0"} Jan 28 19:35:29 crc kubenswrapper[4767]: I0128 19:35:29.373888 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ft4fg" podStartSLOduration=2.855805004 podStartE2EDuration="5.373852494s" podCreationTimestamp="2026-01-28 19:35:24 +0000 UTC" firstStartedPulling="2026-01-28 19:35:26.267471423 +0000 UTC m=+3932.231654297" lastFinishedPulling="2026-01-28 19:35:28.785518913 +0000 UTC m=+3934.749701787" observedRunningTime="2026-01-28 19:35:29.361199455 +0000 UTC m=+3935.325382329" watchObservedRunningTime="2026-01-28 19:35:29.373852494 +0000 UTC m=+3935.338035368" Jan 28 19:35:29 crc kubenswrapper[4767]: I0128 19:35:29.471115 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/prometheus-metric-storage-0"] Jan 28 19:35:30 crc kubenswrapper[4767]: I0128 19:35:30.341941 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"57323335-588f-4faa-9dc4-dcaf2aa2b5f0","Type":"ContainerStarted","Data":"d54c12914c28496f2922af1f59b94ec44ce8738713261705fcfe6853bcdd70f7"} Jan 28 19:35:34 crc kubenswrapper[4767]: I0128 19:35:34.389423 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"57323335-588f-4faa-9dc4-dcaf2aa2b5f0","Type":"ContainerStarted","Data":"8d9c43a54f53f94336f43e5fec92793e394d4de85817a5b99bfc52c3fe8764d0"} Jan 28 19:35:35 crc kubenswrapper[4767]: I0128 19:35:35.275403 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:35 crc kubenswrapper[4767]: I0128 19:35:35.275706 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:35 crc kubenswrapper[4767]: I0128 19:35:35.326476 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:35 crc kubenswrapper[4767]: I0128 19:35:35.448112 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:35 crc kubenswrapper[4767]: I0128 19:35:35.567223 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ft4fg"] Jan 28 19:35:37 crc kubenswrapper[4767]: I0128 19:35:37.423952 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ft4fg" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="registry-server" containerID="cri-o://85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0" gracePeriod=2 Jan 28 19:35:37 crc kubenswrapper[4767]: I0128 19:35:37.925433 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.057528 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-catalog-content\") pod \"c90559fe-83c6-4d44-ad19-205e7fbe5417\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.057802 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-utilities\") pod \"c90559fe-83c6-4d44-ad19-205e7fbe5417\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.057962 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4zktj\" (UniqueName: \"kubernetes.io/projected/c90559fe-83c6-4d44-ad19-205e7fbe5417-kube-api-access-4zktj\") pod \"c90559fe-83c6-4d44-ad19-205e7fbe5417\" (UID: \"c90559fe-83c6-4d44-ad19-205e7fbe5417\") " Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.060173 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-utilities" (OuterVolumeSpecName: "utilities") pod "c90559fe-83c6-4d44-ad19-205e7fbe5417" (UID: "c90559fe-83c6-4d44-ad19-205e7fbe5417"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.065485 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c90559fe-83c6-4d44-ad19-205e7fbe5417-kube-api-access-4zktj" (OuterVolumeSpecName: "kube-api-access-4zktj") pod "c90559fe-83c6-4d44-ad19-205e7fbe5417" (UID: "c90559fe-83c6-4d44-ad19-205e7fbe5417"). InnerVolumeSpecName "kube-api-access-4zktj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.151000 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c90559fe-83c6-4d44-ad19-205e7fbe5417" (UID: "c90559fe-83c6-4d44-ad19-205e7fbe5417"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.161094 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.161135 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4zktj\" (UniqueName: \"kubernetes.io/projected/c90559fe-83c6-4d44-ad19-205e7fbe5417-kube-api-access-4zktj\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.161148 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c90559fe-83c6-4d44-ad19-205e7fbe5417-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.440579 4767 generic.go:334] "Generic (PLEG): container finished" podID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerID="85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0" exitCode=0 Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.440669 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft4fg" event={"ID":"c90559fe-83c6-4d44-ad19-205e7fbe5417","Type":"ContainerDied","Data":"85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0"} Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.441006 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ft4fg" event={"ID":"c90559fe-83c6-4d44-ad19-205e7fbe5417","Type":"ContainerDied","Data":"55c141efcea31b245c6b790d00729b4c8f6b714b95a9fab9bb623e169e0f25ba"} Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.441031 4767 scope.go:117] "RemoveContainer" containerID="85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.440678 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ft4fg" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.476819 4767 scope.go:117] "RemoveContainer" containerID="27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.478571 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ft4fg"] Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.496982 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ft4fg"] Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.516260 4767 scope.go:117] "RemoveContainer" containerID="29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.574025 4767 scope.go:117] "RemoveContainer" containerID="85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0" Jan 28 19:35:38 crc kubenswrapper[4767]: E0128 19:35:38.574768 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0\": container with ID starting with 85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0 not found: ID does not exist" containerID="85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.574811 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0"} err="failed to get container status \"85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0\": rpc error: code = NotFound desc = could not find container \"85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0\": container with ID starting with 85803cfb8a103f03a80be625182d8a0d5f4c00ba510c479472517d75a6e089c0 not found: ID does not exist" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.574845 4767 scope.go:117] "RemoveContainer" containerID="27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c" Jan 28 19:35:38 crc kubenswrapper[4767]: E0128 19:35:38.575171 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c\": container with ID starting with 27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c not found: ID does not exist" containerID="27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.575220 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c"} err="failed to get container status \"27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c\": rpc error: code = NotFound desc = could not find container \"27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c\": container with ID starting with 27560c1c684ae13d694873a75be75b0cbc91c57b9e23a7aa173a779068c8710c not found: ID does not exist" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.575241 4767 scope.go:117] "RemoveContainer" containerID="29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237" Jan 28 19:35:38 crc kubenswrapper[4767]: E0128 19:35:38.575547 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237\": container with ID starting with 29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237 not found: ID does not exist" containerID="29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.575584 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237"} err="failed to get container status \"29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237\": rpc error: code = NotFound desc = could not find container \"29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237\": container with ID starting with 29bbe9af0207507ff9a544a912cf99af1af0c525c78eb3a85a3cae494fb3e237 not found: ID does not exist" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.796105 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:35:38 crc kubenswrapper[4767]: E0128 19:35:38.796618 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:35:38 crc kubenswrapper[4767]: I0128 19:35:38.807117 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" path="/var/lib/kubelet/pods/c90559fe-83c6-4d44-ad19-205e7fbe5417/volumes" Jan 28 19:35:42 crc kubenswrapper[4767]: I0128 19:35:42.484458 4767 generic.go:334] "Generic (PLEG): container finished" podID="57323335-588f-4faa-9dc4-dcaf2aa2b5f0" containerID="8d9c43a54f53f94336f43e5fec92793e394d4de85817a5b99bfc52c3fe8764d0" exitCode=0 Jan 28 19:35:42 crc kubenswrapper[4767]: I0128 19:35:42.484560 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"57323335-588f-4faa-9dc4-dcaf2aa2b5f0","Type":"ContainerDied","Data":"8d9c43a54f53f94336f43e5fec92793e394d4de85817a5b99bfc52c3fe8764d0"} Jan 28 19:35:43 crc kubenswrapper[4767]: I0128 19:35:43.500673 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"57323335-588f-4faa-9dc4-dcaf2aa2b5f0","Type":"ContainerStarted","Data":"4337949ef4f6f4dffbff66872bedb065d7c9081bb1361d20064ac1a25b55494b"} Jan 28 19:35:47 crc kubenswrapper[4767]: I0128 19:35:47.545201 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"57323335-588f-4faa-9dc4-dcaf2aa2b5f0","Type":"ContainerStarted","Data":"23b6b3c5d4eafc8741cccf710bf00ec8295f42282c8aaf302c4ac6973e7ae220"} Jan 28 19:35:47 crc kubenswrapper[4767]: I0128 19:35:47.548406 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/prometheus-metric-storage-0" event={"ID":"57323335-588f-4faa-9dc4-dcaf2aa2b5f0","Type":"ContainerStarted","Data":"83d1728009285eb22b36c388e05fb258965dea2cdea3724dbbf19ed3b71fb2ec"} Jan 28 19:35:47 crc kubenswrapper[4767]: I0128 19:35:47.572863 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/prometheus-metric-storage-0" podStartSLOduration=19.572844107999998 podStartE2EDuration="19.572844108s" podCreationTimestamp="2026-01-28 19:35:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-28 19:35:47.571754834 +0000 UTC m=+3953.535937718" watchObservedRunningTime="2026-01-28 19:35:47.572844108 +0000 UTC m=+3953.537026982" Jan 28 19:35:48 crc kubenswrapper[4767]: I0128 19:35:48.790523 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:50 crc kubenswrapper[4767]: I0128 19:35:50.796857 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:35:50 crc kubenswrapper[4767]: E0128 19:35:50.797261 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:35:58 crc kubenswrapper[4767]: I0128 19:35:58.791544 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:58 crc kubenswrapper[4767]: I0128 19:35:58.812899 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/prometheus-metric-storage-0" Jan 28 19:35:59 crc kubenswrapper[4767]: I0128 19:35:59.676928 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/prometheus-metric-storage-0" Jan 28 19:36:04 crc kubenswrapper[4767]: I0128 19:36:04.796700 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:36:04 crc kubenswrapper[4767]: E0128 19:36:04.797930 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:36:20 crc kubenswrapper[4767]: I0128 19:36:20.346401 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:36:21 crc kubenswrapper[4767]: I0128 19:36:21.375671 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"7863ea4af4437efd3146e9c7a8845285fbe39a3db1b834dd9d43a44112bed4a1"} Jan 28 19:38:45 crc kubenswrapper[4767]: I0128 19:38:45.455540 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:38:45 crc kubenswrapper[4767]: I0128 19:38:45.456345 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:39:15 crc kubenswrapper[4767]: I0128 19:39:15.455629 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:39:15 crc kubenswrapper[4767]: I0128 19:39:15.457670 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:39:25 crc kubenswrapper[4767]: I0128 19:39:25.778518 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-79f6f75b9c-dhf5c_0d7c50d3-1348-43e5-a8fa-f05cd53d2a42/manager/0.log" Jan 28 19:39:45 crc kubenswrapper[4767]: I0128 19:39:45.455479 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:39:45 crc kubenswrapper[4767]: I0128 19:39:45.456006 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:39:45 crc kubenswrapper[4767]: I0128 19:39:45.456054 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:39:45 crc kubenswrapper[4767]: I0128 19:39:45.456906 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7863ea4af4437efd3146e9c7a8845285fbe39a3db1b834dd9d43a44112bed4a1"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:39:45 crc kubenswrapper[4767]: I0128 19:39:45.456961 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://7863ea4af4437efd3146e9c7a8845285fbe39a3db1b834dd9d43a44112bed4a1" gracePeriod=600 Jan 28 19:39:46 crc kubenswrapper[4767]: I0128 19:39:46.516338 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="7863ea4af4437efd3146e9c7a8845285fbe39a3db1b834dd9d43a44112bed4a1" exitCode=0 Jan 28 19:39:46 crc kubenswrapper[4767]: I0128 19:39:46.516501 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"7863ea4af4437efd3146e9c7a8845285fbe39a3db1b834dd9d43a44112bed4a1"} Jan 28 19:39:46 crc kubenswrapper[4767]: I0128 19:39:46.516954 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3"} Jan 28 19:39:46 crc kubenswrapper[4767]: I0128 19:39:46.516978 4767 scope.go:117] "RemoveContainer" containerID="deeb78a4c6ed108a1176d871e4fdafdbebdfe4d2070459f661c8d1c06a61daea" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.445155 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6wdzt/must-gather-5mtn7"] Jan 28 19:39:51 crc kubenswrapper[4767]: E0128 19:39:51.446157 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="extract-utilities" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.446174 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="extract-utilities" Jan 28 19:39:51 crc kubenswrapper[4767]: E0128 19:39:51.446243 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="registry-server" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.446250 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="registry-server" Jan 28 19:39:51 crc kubenswrapper[4767]: E0128 19:39:51.446267 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="extract-content" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.446274 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="extract-content" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.446496 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="c90559fe-83c6-4d44-ad19-205e7fbe5417" containerName="registry-server" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.447678 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.449823 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6wdzt"/"kube-root-ca.crt" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.450040 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-6wdzt"/"openshift-service-ca.crt" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.452215 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-6wdzt"/"default-dockercfg-2l7gj" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.474120 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6wdzt/must-gather-5mtn7"] Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.604362 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-must-gather-output\") pod \"must-gather-5mtn7\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.604827 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9qc9\" (UniqueName: \"kubernetes.io/projected/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-kube-api-access-t9qc9\") pod \"must-gather-5mtn7\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.706555 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-must-gather-output\") pod \"must-gather-5mtn7\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.706670 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9qc9\" (UniqueName: \"kubernetes.io/projected/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-kube-api-access-t9qc9\") pod \"must-gather-5mtn7\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.707799 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-must-gather-output\") pod \"must-gather-5mtn7\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.728393 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9qc9\" (UniqueName: \"kubernetes.io/projected/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-kube-api-access-t9qc9\") pod \"must-gather-5mtn7\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:51 crc kubenswrapper[4767]: I0128 19:39:51.767207 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:39:52 crc kubenswrapper[4767]: I0128 19:39:52.257866 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-6wdzt/must-gather-5mtn7"] Jan 28 19:39:52 crc kubenswrapper[4767]: I0128 19:39:52.658496 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:39:53 crc kubenswrapper[4767]: I0128 19:39:53.594299 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" event={"ID":"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d","Type":"ContainerStarted","Data":"c3b334ce41e2df603f929482558b3330fa26901347332f6ab4456b6d3b7bbbc7"} Jan 28 19:39:59 crc kubenswrapper[4767]: I0128 19:39:59.660624 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" event={"ID":"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d","Type":"ContainerStarted","Data":"b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6"} Jan 28 19:39:59 crc kubenswrapper[4767]: I0128 19:39:59.661076 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" event={"ID":"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d","Type":"ContainerStarted","Data":"47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd"} Jan 28 19:39:59 crc kubenswrapper[4767]: I0128 19:39:59.684763 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" podStartSLOduration=2.848063904 podStartE2EDuration="8.684739425s" podCreationTimestamp="2026-01-28 19:39:51 +0000 UTC" firstStartedPulling="2026-01-28 19:39:52.65823469 +0000 UTC m=+4198.622417574" lastFinishedPulling="2026-01-28 19:39:58.494910221 +0000 UTC m=+4204.459093095" observedRunningTime="2026-01-28 19:39:59.672915352 +0000 UTC m=+4205.637098226" watchObservedRunningTime="2026-01-28 19:39:59.684739425 +0000 UTC m=+4205.648922299" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.642770 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6wdzt/crc-debug-4q98g"] Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.644825 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.710762 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-host\") pod \"crc-debug-4q98g\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.710850 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2ksfp\" (UniqueName: \"kubernetes.io/projected/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-kube-api-access-2ksfp\") pod \"crc-debug-4q98g\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.813554 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-host\") pod \"crc-debug-4q98g\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.813658 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2ksfp\" (UniqueName: \"kubernetes.io/projected/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-kube-api-access-2ksfp\") pod \"crc-debug-4q98g\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.813765 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-host\") pod \"crc-debug-4q98g\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.837031 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2ksfp\" (UniqueName: \"kubernetes.io/projected/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-kube-api-access-2ksfp\") pod \"crc-debug-4q98g\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:04 crc kubenswrapper[4767]: I0128 19:40:04.965145 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:05 crc kubenswrapper[4767]: I0128 19:40:05.716221 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" event={"ID":"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c","Type":"ContainerStarted","Data":"6847bc2ef1db229a6774023e564d27f74eaa313f85dda85092aedf54e4e7b07f"} Jan 28 19:40:17 crc kubenswrapper[4767]: I0128 19:40:17.855882 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" event={"ID":"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c","Type":"ContainerStarted","Data":"8017a0e4c8257740a75df642381d39753cbcd47e380c088d0945dcd04c3d6799"} Jan 28 19:40:17 crc kubenswrapper[4767]: I0128 19:40:17.871577 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" podStartSLOduration=2.398211877 podStartE2EDuration="13.871551682s" podCreationTimestamp="2026-01-28 19:40:04 +0000 UTC" firstStartedPulling="2026-01-28 19:40:05.455020664 +0000 UTC m=+4211.419203538" lastFinishedPulling="2026-01-28 19:40:16.928360469 +0000 UTC m=+4222.892543343" observedRunningTime="2026-01-28 19:40:17.867816215 +0000 UTC m=+4223.831999119" watchObservedRunningTime="2026-01-28 19:40:17.871551682 +0000 UTC m=+4223.835734556" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.676442 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bdw5p"] Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.682265 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.695012 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bdw5p"] Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.748443 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-catalog-content\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.748948 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-utilities\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.749158 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-46wh6\" (UniqueName: \"kubernetes.io/projected/d61c67f5-a051-422e-adb7-8f6769e3bae5-kube-api-access-46wh6\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.850740 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-utilities\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.850870 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-46wh6\" (UniqueName: \"kubernetes.io/projected/d61c67f5-a051-422e-adb7-8f6769e3bae5-kube-api-access-46wh6\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.850950 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-catalog-content\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.851447 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-catalog-content\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.851553 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-utilities\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:29 crc kubenswrapper[4767]: I0128 19:40:29.881116 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-46wh6\" (UniqueName: \"kubernetes.io/projected/d61c67f5-a051-422e-adb7-8f6769e3bae5-kube-api-access-46wh6\") pod \"community-operators-bdw5p\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:30 crc kubenswrapper[4767]: I0128 19:40:30.000941 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:30 crc kubenswrapper[4767]: I0128 19:40:30.582346 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bdw5p"] Jan 28 19:40:30 crc kubenswrapper[4767]: I0128 19:40:30.977808 4767 generic.go:334] "Generic (PLEG): container finished" podID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerID="f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2" exitCode=0 Jan 28 19:40:30 crc kubenswrapper[4767]: I0128 19:40:30.977992 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bdw5p" event={"ID":"d61c67f5-a051-422e-adb7-8f6769e3bae5","Type":"ContainerDied","Data":"f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2"} Jan 28 19:40:30 crc kubenswrapper[4767]: I0128 19:40:30.978135 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bdw5p" event={"ID":"d61c67f5-a051-422e-adb7-8f6769e3bae5","Type":"ContainerStarted","Data":"5be9975b169c6246e3631a85eb969d7198911deb0a81604073751c3ad0bb5344"} Jan 28 19:40:32 crc kubenswrapper[4767]: I0128 19:40:32.997912 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bdw5p" event={"ID":"d61c67f5-a051-422e-adb7-8f6769e3bae5","Type":"ContainerStarted","Data":"a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb"} Jan 28 19:40:34 crc kubenswrapper[4767]: I0128 19:40:34.009777 4767 generic.go:334] "Generic (PLEG): container finished" podID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerID="a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb" exitCode=0 Jan 28 19:40:34 crc kubenswrapper[4767]: I0128 19:40:34.009848 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bdw5p" event={"ID":"d61c67f5-a051-422e-adb7-8f6769e3bae5","Type":"ContainerDied","Data":"a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb"} Jan 28 19:40:35 crc kubenswrapper[4767]: I0128 19:40:35.019857 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bdw5p" event={"ID":"d61c67f5-a051-422e-adb7-8f6769e3bae5","Type":"ContainerStarted","Data":"e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d"} Jan 28 19:40:35 crc kubenswrapper[4767]: I0128 19:40:35.048035 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bdw5p" podStartSLOduration=2.6402125400000003 podStartE2EDuration="6.048012694s" podCreationTimestamp="2026-01-28 19:40:29 +0000 UTC" firstStartedPulling="2026-01-28 19:40:30.979762496 +0000 UTC m=+4236.943945370" lastFinishedPulling="2026-01-28 19:40:34.38756265 +0000 UTC m=+4240.351745524" observedRunningTime="2026-01-28 19:40:35.042098218 +0000 UTC m=+4241.006281092" watchObservedRunningTime="2026-01-28 19:40:35.048012694 +0000 UTC m=+4241.012195568" Jan 28 19:40:39 crc kubenswrapper[4767]: I0128 19:40:39.065289 4767 generic.go:334] "Generic (PLEG): container finished" podID="2a8f554e-5ef5-49cb-bd09-a3b7f041e95c" containerID="8017a0e4c8257740a75df642381d39753cbcd47e380c088d0945dcd04c3d6799" exitCode=0 Jan 28 19:40:39 crc kubenswrapper[4767]: I0128 19:40:39.065369 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" event={"ID":"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c","Type":"ContainerDied","Data":"8017a0e4c8257740a75df642381d39753cbcd47e380c088d0945dcd04c3d6799"} Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.001113 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.001508 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.056149 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.134959 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.204891 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.235982 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6wdzt/crc-debug-4q98g"] Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.244976 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6wdzt/crc-debug-4q98g"] Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.288661 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2ksfp\" (UniqueName: \"kubernetes.io/projected/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-kube-api-access-2ksfp\") pod \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.288826 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-host\") pod \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\" (UID: \"2a8f554e-5ef5-49cb-bd09-a3b7f041e95c\") " Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.288966 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-host" (OuterVolumeSpecName: "host") pod "2a8f554e-5ef5-49cb-bd09-a3b7f041e95c" (UID: "2a8f554e-5ef5-49cb-bd09-a3b7f041e95c"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.289362 4767 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-host\") on node \"crc\" DevicePath \"\"" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.292900 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bdw5p"] Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.295346 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-kube-api-access-2ksfp" (OuterVolumeSpecName: "kube-api-access-2ksfp") pod "2a8f554e-5ef5-49cb-bd09-a3b7f041e95c" (UID: "2a8f554e-5ef5-49cb-bd09-a3b7f041e95c"). InnerVolumeSpecName "kube-api-access-2ksfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.391438 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2ksfp\" (UniqueName: \"kubernetes.io/projected/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c-kube-api-access-2ksfp\") on node \"crc\" DevicePath \"\"" Jan 28 19:40:40 crc kubenswrapper[4767]: I0128 19:40:40.808618 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2a8f554e-5ef5-49cb-bd09-a3b7f041e95c" path="/var/lib/kubelet/pods/2a8f554e-5ef5-49cb-bd09-a3b7f041e95c/volumes" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.083555 4767 scope.go:117] "RemoveContainer" containerID="8017a0e4c8257740a75df642381d39753cbcd47e380c088d0945dcd04c3d6799" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.083572 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-4q98g" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.524621 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-6wdzt/crc-debug-k2z62"] Jan 28 19:40:41 crc kubenswrapper[4767]: E0128 19:40:41.525271 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a8f554e-5ef5-49cb-bd09-a3b7f041e95c" containerName="container-00" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.525285 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a8f554e-5ef5-49cb-bd09-a3b7f041e95c" containerName="container-00" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.525485 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a8f554e-5ef5-49cb-bd09-a3b7f041e95c" containerName="container-00" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.526173 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.621375 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/730da748-48cb-4d3e-9b3a-d5a3556f8254-host\") pod \"crc-debug-k2z62\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.621432 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnxg6\" (UniqueName: \"kubernetes.io/projected/730da748-48cb-4d3e-9b3a-d5a3556f8254-kube-api-access-hnxg6\") pod \"crc-debug-k2z62\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.723971 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/730da748-48cb-4d3e-9b3a-d5a3556f8254-host\") pod \"crc-debug-k2z62\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.724055 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnxg6\" (UniqueName: \"kubernetes.io/projected/730da748-48cb-4d3e-9b3a-d5a3556f8254-kube-api-access-hnxg6\") pod \"crc-debug-k2z62\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.724530 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/730da748-48cb-4d3e-9b3a-d5a3556f8254-host\") pod \"crc-debug-k2z62\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.743198 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnxg6\" (UniqueName: \"kubernetes.io/projected/730da748-48cb-4d3e-9b3a-d5a3556f8254-kube-api-access-hnxg6\") pod \"crc-debug-k2z62\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:41 crc kubenswrapper[4767]: I0128 19:40:41.844818 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.094912 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/crc-debug-k2z62" event={"ID":"730da748-48cb-4d3e-9b3a-d5a3556f8254","Type":"ContainerStarted","Data":"7efef395a6b6a8b9c71ec0c99cf5bf7b7760476c97fbbcac73b33ab5a642adc3"} Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.095072 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bdw5p" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="registry-server" containerID="cri-o://e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d" gracePeriod=2 Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.570239 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.643634 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-46wh6\" (UniqueName: \"kubernetes.io/projected/d61c67f5-a051-422e-adb7-8f6769e3bae5-kube-api-access-46wh6\") pod \"d61c67f5-a051-422e-adb7-8f6769e3bae5\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.643897 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-utilities\") pod \"d61c67f5-a051-422e-adb7-8f6769e3bae5\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.644000 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-catalog-content\") pod \"d61c67f5-a051-422e-adb7-8f6769e3bae5\" (UID: \"d61c67f5-a051-422e-adb7-8f6769e3bae5\") " Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.644993 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-utilities" (OuterVolumeSpecName: "utilities") pod "d61c67f5-a051-422e-adb7-8f6769e3bae5" (UID: "d61c67f5-a051-422e-adb7-8f6769e3bae5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.650049 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d61c67f5-a051-422e-adb7-8f6769e3bae5-kube-api-access-46wh6" (OuterVolumeSpecName: "kube-api-access-46wh6") pod "d61c67f5-a051-422e-adb7-8f6769e3bae5" (UID: "d61c67f5-a051-422e-adb7-8f6769e3bae5"). InnerVolumeSpecName "kube-api-access-46wh6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.720351 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d61c67f5-a051-422e-adb7-8f6769e3bae5" (UID: "d61c67f5-a051-422e-adb7-8f6769e3bae5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.747024 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.747070 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-46wh6\" (UniqueName: \"kubernetes.io/projected/d61c67f5-a051-422e-adb7-8f6769e3bae5-kube-api-access-46wh6\") on node \"crc\" DevicePath \"\"" Jan 28 19:40:42 crc kubenswrapper[4767]: I0128 19:40:42.747080 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d61c67f5-a051-422e-adb7-8f6769e3bae5-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.108427 4767 generic.go:334] "Generic (PLEG): container finished" podID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerID="e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d" exitCode=0 Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.108560 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bdw5p" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.108564 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bdw5p" event={"ID":"d61c67f5-a051-422e-adb7-8f6769e3bae5","Type":"ContainerDied","Data":"e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d"} Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.108665 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bdw5p" event={"ID":"d61c67f5-a051-422e-adb7-8f6769e3bae5","Type":"ContainerDied","Data":"5be9975b169c6246e3631a85eb969d7198911deb0a81604073751c3ad0bb5344"} Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.108692 4767 scope.go:117] "RemoveContainer" containerID="e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.111426 4767 generic.go:334] "Generic (PLEG): container finished" podID="730da748-48cb-4d3e-9b3a-d5a3556f8254" containerID="9014a9d0e3f64ff993502ce172092310574971d569cf2c38ef6595750f4c9d5e" exitCode=1 Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.111561 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/crc-debug-k2z62" event={"ID":"730da748-48cb-4d3e-9b3a-d5a3556f8254","Type":"ContainerDied","Data":"9014a9d0e3f64ff993502ce172092310574971d569cf2c38ef6595750f4c9d5e"} Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.139600 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bdw5p"] Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.147520 4767 scope.go:117] "RemoveContainer" containerID="a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.153304 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bdw5p"] Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.195263 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6wdzt/crc-debug-k2z62"] Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.198245 4767 scope.go:117] "RemoveContainer" containerID="f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.209393 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6wdzt/crc-debug-k2z62"] Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.250967 4767 scope.go:117] "RemoveContainer" containerID="e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d" Jan 28 19:40:43 crc kubenswrapper[4767]: E0128 19:40:43.251606 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d\": container with ID starting with e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d not found: ID does not exist" containerID="e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.251661 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d"} err="failed to get container status \"e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d\": rpc error: code = NotFound desc = could not find container \"e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d\": container with ID starting with e04c2005b6eec834fb13f8f8686eee51dbee58b37c6449104e0e7ba9e0b4b00d not found: ID does not exist" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.251695 4767 scope.go:117] "RemoveContainer" containerID="a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb" Jan 28 19:40:43 crc kubenswrapper[4767]: E0128 19:40:43.252648 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb\": container with ID starting with a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb not found: ID does not exist" containerID="a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.252684 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb"} err="failed to get container status \"a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb\": rpc error: code = NotFound desc = could not find container \"a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb\": container with ID starting with a72d5833533c4d44326adb733dc79a861cf733bc327a5901997adb8a939131bb not found: ID does not exist" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.252705 4767 scope.go:117] "RemoveContainer" containerID="f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2" Jan 28 19:40:43 crc kubenswrapper[4767]: E0128 19:40:43.260898 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2\": container with ID starting with f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2 not found: ID does not exist" containerID="f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2" Jan 28 19:40:43 crc kubenswrapper[4767]: I0128 19:40:43.260952 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2"} err="failed to get container status \"f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2\": rpc error: code = NotFound desc = could not find container \"f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2\": container with ID starting with f0290a339552d020f7cad58377aeb6099edd5fd01a4b51cb403f97debae96ee2 not found: ID does not exist" Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.223798 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.280752 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnxg6\" (UniqueName: \"kubernetes.io/projected/730da748-48cb-4d3e-9b3a-d5a3556f8254-kube-api-access-hnxg6\") pod \"730da748-48cb-4d3e-9b3a-d5a3556f8254\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.281032 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/730da748-48cb-4d3e-9b3a-d5a3556f8254-host\") pod \"730da748-48cb-4d3e-9b3a-d5a3556f8254\" (UID: \"730da748-48cb-4d3e-9b3a-d5a3556f8254\") " Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.281655 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/730da748-48cb-4d3e-9b3a-d5a3556f8254-host" (OuterVolumeSpecName: "host") pod "730da748-48cb-4d3e-9b3a-d5a3556f8254" (UID: "730da748-48cb-4d3e-9b3a-d5a3556f8254"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.292400 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/730da748-48cb-4d3e-9b3a-d5a3556f8254-kube-api-access-hnxg6" (OuterVolumeSpecName: "kube-api-access-hnxg6") pod "730da748-48cb-4d3e-9b3a-d5a3556f8254" (UID: "730da748-48cb-4d3e-9b3a-d5a3556f8254"). InnerVolumeSpecName "kube-api-access-hnxg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.383337 4767 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/730da748-48cb-4d3e-9b3a-d5a3556f8254-host\") on node \"crc\" DevicePath \"\"" Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.383375 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnxg6\" (UniqueName: \"kubernetes.io/projected/730da748-48cb-4d3e-9b3a-d5a3556f8254-kube-api-access-hnxg6\") on node \"crc\" DevicePath \"\"" Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.808650 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="730da748-48cb-4d3e-9b3a-d5a3556f8254" path="/var/lib/kubelet/pods/730da748-48cb-4d3e-9b3a-d5a3556f8254/volumes" Jan 28 19:40:44 crc kubenswrapper[4767]: I0128 19:40:44.809391 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" path="/var/lib/kubelet/pods/d61c67f5-a051-422e-adb7-8f6769e3bae5/volumes" Jan 28 19:40:45 crc kubenswrapper[4767]: I0128 19:40:45.133128 4767 scope.go:117] "RemoveContainer" containerID="9014a9d0e3f64ff993502ce172092310574971d569cf2c38ef6595750f4c9d5e" Jan 28 19:40:45 crc kubenswrapper[4767]: I0128 19:40:45.133181 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/crc-debug-k2z62" Jan 28 19:40:54 crc kubenswrapper[4767]: I0128 19:40:54.389028 4767 scope.go:117] "RemoveContainer" containerID="955903ceb671f87018620ac77fd3cb39a8ceaa17fec0ee6db461e8fac14d25b4" Jan 28 19:40:54 crc kubenswrapper[4767]: I0128 19:40:54.412101 4767 scope.go:117] "RemoveContainer" containerID="5d8db8739f105033858e4b8b192f22a69104fda4ff88d2d2ef102bf79a536fef" Jan 28 19:40:54 crc kubenswrapper[4767]: I0128 19:40:54.458519 4767 scope.go:117] "RemoveContainer" containerID="afb19b3f752cf0a8d3c26c0e4a666bb6d5e8a95e20c9a7609b0b74b5722c9a82" Jan 28 19:41:43 crc kubenswrapper[4767]: I0128 19:41:43.535747 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_24ff0e29-e1a3-43b3-8fc1-1d9e46fae486/init-config-reloader/0.log" Jan 28 19:41:43 crc kubenswrapper[4767]: I0128 19:41:43.740372 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_24ff0e29-e1a3-43b3-8fc1-1d9e46fae486/alertmanager/0.log" Jan 28 19:41:43 crc kubenswrapper[4767]: I0128 19:41:43.754070 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_24ff0e29-e1a3-43b3-8fc1-1d9e46fae486/init-config-reloader/0.log" Jan 28 19:41:43 crc kubenswrapper[4767]: I0128 19:41:43.757554 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_alertmanager-metric-storage-0_24ff0e29-e1a3-43b3-8fc1-1d9e46fae486/config-reloader/0.log" Jan 28 19:41:43 crc kubenswrapper[4767]: I0128 19:41:43.927926 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5367cd99-a28a-4678-9add-df5bf4e069fb/aodh-api/0.log" Jan 28 19:41:43 crc kubenswrapper[4767]: I0128 19:41:43.953867 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5367cd99-a28a-4678-9add-df5bf4e069fb/aodh-evaluator/0.log" Jan 28 19:41:43 crc kubenswrapper[4767]: I0128 19:41:43.966847 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5367cd99-a28a-4678-9add-df5bf4e069fb/aodh-listener/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.021918 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_aodh-0_5367cd99-a28a-4678-9add-df5bf4e069fb/aodh-notifier/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.158993 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-df7856cf4-bs48j_eaabcf8d-9a71-4e3e-91d2-1360f1f16b81/barbican-api/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.192471 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-df7856cf4-bs48j_eaabcf8d-9a71-4e3e-91d2-1360f1f16b81/barbican-api-log/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.373965 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6c5d6855b6-2h826_0fb4e951-e65c-46da-9fa1-d710799db1ff/barbican-keystone-listener/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.430651 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6c5d6855b6-2h826_0fb4e951-e65c-46da-9fa1-d710799db1ff/barbican-keystone-listener-log/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.493918 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7c6ddf6ccf-ncm6l_73ac02aa-a281-41be-8b19-b7171b2d7522/barbican-worker/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.630884 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7c6ddf6ccf-ncm6l_73ac02aa-a281-41be-8b19-b7171b2d7522/barbican-worker-log/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.750669 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-6bwph_7859a3a8-d6ca-41b4-98f6-9561f839948a/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.857427 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8/ceilometer-central-agent/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.955542 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8/ceilometer-notification-agent/0.log" Jan 28 19:41:44 crc kubenswrapper[4767]: I0128 19:41:44.959067 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8/proxy-httpd/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.015964 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_6c0d6e5e-3e93-43a7-9211-7ab997c8c3e8/sg-core/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.203909 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_f6eb637e-af35-4164-bf33-abd4c7049906/cinder-api-log/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.240175 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_f6eb637e-af35-4164-bf33-abd4c7049906/cinder-api/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.393661 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_df31a172-a9c6-46bc-a327-03ef85482e5c/cinder-scheduler/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.454785 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.454848 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.462084 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_df31a172-a9c6-46bc-a327-03ef85482e5c/probe/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.572174 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-q8h4q_272b34a9-db41-4ec5-ab16-10a08a84bd34/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.714741 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-9hxsx_7f4f5d75-649d-4cc7-840a-cfeb51c9f9d8/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.800190 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-pjf4k_854fd133-dba5-4457-9017-099a3eacd827/init/0.log" Jan 28 19:41:45 crc kubenswrapper[4767]: I0128 19:41:45.940175 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-pjf4k_854fd133-dba5-4457-9017-099a3eacd827/init/0.log" Jan 28 19:41:46 crc kubenswrapper[4767]: I0128 19:41:46.026487 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d75f767dc-pjf4k_854fd133-dba5-4457-9017-099a3eacd827/dnsmasq-dns/0.log" Jan 28 19:41:46 crc kubenswrapper[4767]: I0128 19:41:46.034783 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-tp4m6_1ebe359f-1b9c-4278-9c6c-5c72cf619080/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:46 crc kubenswrapper[4767]: I0128 19:41:46.275623 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_a8ec04a5-bc34-4006-b761-97437b8e5687/glance-log/0.log" Jan 28 19:41:46 crc kubenswrapper[4767]: I0128 19:41:46.281570 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_a8ec04a5-bc34-4006-b761-97437b8e5687/glance-httpd/0.log" Jan 28 19:41:46 crc kubenswrapper[4767]: I0128 19:41:46.412001 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6/glance-httpd/0.log" Jan 28 19:41:46 crc kubenswrapper[4767]: I0128 19:41:46.481177 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_fafc0c6b-d6be-4600-a3d1-fdc4bfec69a6/glance-log/0.log" Jan 28 19:41:46 crc kubenswrapper[4767]: I0128 19:41:46.885129 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-api-68db597dbc-btcng_1ace2f10-9cec-4091-a68f-7680d0f282fc/heat-api/0.log" Jan 28 19:41:47 crc kubenswrapper[4767]: I0128 19:41:47.209111 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-engine-75f5487b88-96qvr_79f79643-bd94-43a4-9be8-98513b220314/heat-engine/0.log" Jan 28 19:41:47 crc kubenswrapper[4767]: I0128 19:41:47.232856 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_heat-cfnapi-7fb965dcbf-xj95b_0b974209-d851-443b-88b4-868e5564e0fb/heat-cfnapi/0.log" Jan 28 19:41:47 crc kubenswrapper[4767]: I0128 19:41:47.470718 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-846wx_a58fe523-7845-4753-8549-c70919ee390b/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:47 crc kubenswrapper[4767]: I0128 19:41:47.494807 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-sb8nf_b130c420-d8a7-4063-a098-4b16682078be/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:47 crc kubenswrapper[4767]: I0128 19:41:47.770489 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29493781-88g67_ffabd2f8-aeec-4a2e-a4d7-317682cbe000/keystone-cron/0.log" Jan 28 19:41:47 crc kubenswrapper[4767]: I0128 19:41:47.782791 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_df92d3bd-ff9c-4df6-b783-7488249daa20/kube-state-metrics/0.log" Jan 28 19:41:47 crc kubenswrapper[4767]: I0128 19:41:47.793680 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-58b8dff7b-297q9_0784a151-311d-42e2-b27d-3a1ce38e28d9/keystone-api/0.log" Jan 28 19:41:48 crc kubenswrapper[4767]: I0128 19:41:48.070067 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-nkz64_b9edc2ac-e23a-459d-a2e6-0ca0a4f6cc5a/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:48 crc kubenswrapper[4767]: I0128 19:41:48.773686 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-678958868f-sc9dm_62702b24-3b22-4f62-8c70-e14a8749be55/neutron-httpd/0.log" Jan 28 19:41:48 crc kubenswrapper[4767]: I0128 19:41:48.811379 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-678958868f-sc9dm_62702b24-3b22-4f62-8c70-e14a8749be55/neutron-api/0.log" Jan 28 19:41:48 crc kubenswrapper[4767]: I0128 19:41:48.840173 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-cx5tv_485ff095-77ad-4166-af4c-1b900e3d2c4a/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:49 crc kubenswrapper[4767]: I0128 19:41:49.175134 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_ebd19321-8c26-4b0c-9303-39e4c35b9050/nova-api-log/0.log" Jan 28 19:41:49 crc kubenswrapper[4767]: I0128 19:41:49.505746 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_f2799053-dd71-462c-bb14-ccdee947780e/nova-cell0-conductor-conductor/0.log" Jan 28 19:41:49 crc kubenswrapper[4767]: I0128 19:41:49.509550 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_ebd19321-8c26-4b0c-9303-39e4c35b9050/nova-api-api/0.log" Jan 28 19:41:49 crc kubenswrapper[4767]: I0128 19:41:49.606311 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_21f835c6-b7b5-413c-8f9f-e349583860fc/nova-cell1-conductor-conductor/0.log" Jan 28 19:41:49 crc kubenswrapper[4767]: I0128 19:41:49.821623 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_af20411f-dff0-46d6-9405-7e3f4b28f309/nova-cell1-novncproxy-novncproxy/0.log" Jan 28 19:41:49 crc kubenswrapper[4767]: I0128 19:41:49.900478 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-26dlw_65d614d7-80ab-4e73-a07c-ee9639e65436/nova-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:50 crc kubenswrapper[4767]: I0128 19:41:50.515026 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_532091ea-ba10-45b3-8843-bf1582e4e30e/nova-metadata-log/0.log" Jan 28 19:41:50 crc kubenswrapper[4767]: I0128 19:41:50.631762 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_ee1ce6f4-07db-4542-bdf2-49fc980801eb/nova-scheduler-scheduler/0.log" Jan 28 19:41:51 crc kubenswrapper[4767]: I0128 19:41:51.067592 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ae717a60-f106-49ec-abaa-3be941f7f907/mysql-bootstrap/0.log" Jan 28 19:41:51 crc kubenswrapper[4767]: I0128 19:41:51.377957 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ae717a60-f106-49ec-abaa-3be941f7f907/mysql-bootstrap/0.log" Jan 28 19:41:51 crc kubenswrapper[4767]: I0128 19:41:51.428823 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_ae717a60-f106-49ec-abaa-3be941f7f907/galera/0.log" Jan 28 19:41:51 crc kubenswrapper[4767]: I0128 19:41:51.662575 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_837cd3ca-8015-435a-b908-0b46125d68ae/mysql-bootstrap/0.log" Jan 28 19:41:51 crc kubenswrapper[4767]: I0128 19:41:51.860867 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_837cd3ca-8015-435a-b908-0b46125d68ae/mysql-bootstrap/0.log" Jan 28 19:41:51 crc kubenswrapper[4767]: I0128 19:41:51.889306 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_837cd3ca-8015-435a-b908-0b46125d68ae/galera/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.102373 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_532091ea-ba10-45b3-8843-bf1582e4e30e/nova-metadata-metadata/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.281822 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_50ab1c7e-8b1e-4bbe-9574-15b14f7d0a58/openstackclient/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.291625 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-dq7h8_79640773-c4bb-4add-83a8-f9a39873bdef/ovn-controller/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.497444 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-h572c_eb5655f4-ca65-4c3f-9ea1-2d81b3c54b73/openstack-network-exporter/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.585327 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-8cjhn_ee92efdf-106a-4ca0-8e46-8122b820a0d1/ovsdb-server-init/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.787234 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-8cjhn_ee92efdf-106a-4ca0-8e46-8122b820a0d1/ovsdb-server/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.799092 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-8cjhn_ee92efdf-106a-4ca0-8e46-8122b820a0d1/ovsdb-server-init/0.log" Jan 28 19:41:52 crc kubenswrapper[4767]: I0128 19:41:52.859968 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-8cjhn_ee92efdf-106a-4ca0-8e46-8122b820a0d1/ovs-vswitchd/0.log" Jan 28 19:41:53 crc kubenswrapper[4767]: I0128 19:41:53.027439 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-rcqwr_5262098e-80da-417a-92db-44c89a52ae2f/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:53 crc kubenswrapper[4767]: I0128 19:41:53.177921 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_bdc30b2c-2b3b-4e16-8a77-965490805677/openstack-network-exporter/0.log" Jan 28 19:41:53 crc kubenswrapper[4767]: I0128 19:41:53.214508 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_bdc30b2c-2b3b-4e16-8a77-965490805677/ovn-northd/0.log" Jan 28 19:41:53 crc kubenswrapper[4767]: I0128 19:41:53.389955 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8fdcc5ee-189a-4c1e-a652-209015b14ac9/openstack-network-exporter/0.log" Jan 28 19:41:53 crc kubenswrapper[4767]: I0128 19:41:53.407744 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_8fdcc5ee-189a-4c1e-a652-209015b14ac9/ovsdbserver-nb/0.log" Jan 28 19:41:53 crc kubenswrapper[4767]: I0128 19:41:53.837045 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_8f18e95b-3769-49b5-a79a-1afd1fed3147/openstack-network-exporter/0.log" Jan 28 19:41:53 crc kubenswrapper[4767]: I0128 19:41:53.890616 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_8f18e95b-3769-49b5-a79a-1afd1fed3147/ovsdbserver-sb/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.087738 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c9f87f88d-pq4n5_15662321-cd3a-4aa2-9a5c-277fcffc3c79/placement-api/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.204220 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-c9f87f88d-pq4n5_15662321-cd3a-4aa2-9a5c-277fcffc3c79/placement-log/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.248712 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_57323335-588f-4faa-9dc4-dcaf2aa2b5f0/init-config-reloader/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.409230 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_57323335-588f-4faa-9dc4-dcaf2aa2b5f0/init-config-reloader/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.432940 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_57323335-588f-4faa-9dc4-dcaf2aa2b5f0/config-reloader/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.557966 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_57323335-588f-4faa-9dc4-dcaf2aa2b5f0/thanos-sidecar/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.571836 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_prometheus-metric-storage-0_57323335-588f-4faa-9dc4-dcaf2aa2b5f0/prometheus/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.647726 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0663f989-bbb4-48a1-b4b8-3463a3a397a1/setup-container/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.862226 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0663f989-bbb4-48a1-b4b8-3463a3a397a1/setup-container/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.941348 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_0663f989-bbb4-48a1-b4b8-3463a3a397a1/rabbitmq/0.log" Jan 28 19:41:54 crc kubenswrapper[4767]: I0128 19:41:54.960598 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_4f9527b6-d3d4-484f-ac80-df76d1a21311/setup-container/0.log" Jan 28 19:41:55 crc kubenswrapper[4767]: I0128 19:41:55.193240 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_4f9527b6-d3d4-484f-ac80-df76d1a21311/rabbitmq/0.log" Jan 28 19:41:55 crc kubenswrapper[4767]: I0128 19:41:55.264222 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_4f9527b6-d3d4-484f-ac80-df76d1a21311/setup-container/0.log" Jan 28 19:41:55 crc kubenswrapper[4767]: I0128 19:41:55.273703 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-nx87x_b4fbc177-5b38-40bc-9ce3-1f4509ccf3db/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:55 crc kubenswrapper[4767]: I0128 19:41:55.463534 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-fnn46_562ce29b-990d-4805-9a26-26dbd95185ed/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:55 crc kubenswrapper[4767]: I0128 19:41:55.539028 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-jkqlf_e1670ff6-3b80-40cc-99e0-496beccc5afc/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:55 crc kubenswrapper[4767]: I0128 19:41:55.725336 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-48scb_9b7391a9-8171-40e0-927d-73542f246a2e/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:55 crc kubenswrapper[4767]: I0128 19:41:55.832647 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-wqd2v_88a3dbaa-6957-40e2-ad04-32b9a2516a40/ssh-known-hosts-edpm-deployment/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.093634 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-66c6598f9f-9w6r9_85ceb5d8-a7fe-4e66-a20f-6a309942c1fc/proxy-server/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.185840 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-66c6598f9f-9w6r9_85ceb5d8-a7fe-4e66-a20f-6a309942c1fc/proxy-httpd/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.261665 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-p8z4r_0bc86a10-2d77-4909-aea5-23bb07841492/swift-ring-rebalance/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.380229 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/account-auditor/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.499193 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/account-replicator/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.503855 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/account-reaper/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.593566 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/account-server/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.609150 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/container-auditor/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.732858 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/container-server/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.770424 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/container-replicator/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.804427 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/container-updater/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.921682 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/object-auditor/0.log" Jan 28 19:41:56 crc kubenswrapper[4767]: I0128 19:41:56.979171 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/object-expirer/0.log" Jan 28 19:41:57 crc kubenswrapper[4767]: I0128 19:41:57.038814 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/object-replicator/0.log" Jan 28 19:41:57 crc kubenswrapper[4767]: I0128 19:41:57.057916 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/object-server/0.log" Jan 28 19:41:57 crc kubenswrapper[4767]: I0128 19:41:57.144226 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/object-updater/0.log" Jan 28 19:41:57 crc kubenswrapper[4767]: I0128 19:41:57.233086 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/rsync/0.log" Jan 28 19:41:57 crc kubenswrapper[4767]: I0128 19:41:57.286503 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_c482494c-49e9-4314-a836-a7bea8f6f8c4/swift-recon-cron/0.log" Jan 28 19:41:57 crc kubenswrapper[4767]: I0128 19:41:57.501709 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-x6scl_91ba9732-3ebf-4a6b-8090-3314d9ece64f/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:41:57 crc kubenswrapper[4767]: I0128 19:41:57.557356 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-rkzdl_e37ab6f3-785e-4437-b1e3-8e5316868389/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Jan 28 19:42:06 crc kubenswrapper[4767]: I0128 19:42:06.788430 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_07049746-7fe4-45ba-9a83-201db13c3de0/memcached/0.log" Jan 28 19:42:15 crc kubenswrapper[4767]: I0128 19:42:15.455987 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:42:15 crc kubenswrapper[4767]: I0128 19:42:15.456516 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:42:27 crc kubenswrapper[4767]: I0128 19:42:27.356911 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc_661846e5-b7a9-457e-99fe-86e94f07dda7/util/0.log" Jan 28 19:42:27 crc kubenswrapper[4767]: I0128 19:42:27.575024 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc_661846e5-b7a9-457e-99fe-86e94f07dda7/pull/0.log" Jan 28 19:42:27 crc kubenswrapper[4767]: I0128 19:42:27.595895 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc_661846e5-b7a9-457e-99fe-86e94f07dda7/util/0.log" Jan 28 19:42:27 crc kubenswrapper[4767]: I0128 19:42:27.634775 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc_661846e5-b7a9-457e-99fe-86e94f07dda7/pull/0.log" Jan 28 19:42:27 crc kubenswrapper[4767]: I0128 19:42:27.808123 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc_661846e5-b7a9-457e-99fe-86e94f07dda7/util/0.log" Jan 28 19:42:27 crc kubenswrapper[4767]: I0128 19:42:27.822105 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc_661846e5-b7a9-457e-99fe-86e94f07dda7/pull/0.log" Jan 28 19:42:27 crc kubenswrapper[4767]: I0128 19:42:27.837752 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_a5f4230ab620e01b235f3e7738ace8414891014872f880a27ae907669b6nbdc_661846e5-b7a9-457e-99fe-86e94f07dda7/extract/0.log" Jan 28 19:42:28 crc kubenswrapper[4767]: I0128 19:42:28.101516 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6bc7f4f4cf-xctgp_ccee4b83-1a09-4828-b370-5fb768476acc/manager/0.log" Jan 28 19:42:28 crc kubenswrapper[4767]: I0128 19:42:28.145375 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-f6487bd57-9hvzg_3355ea8c-1093-449e-9a8d-a4598f46242c/manager/0.log" Jan 28 19:42:28 crc kubenswrapper[4767]: I0128 19:42:28.286578 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-66dfbd6f5d-x8rlq_4e17b611-fe05-4f69-b64e-b1abb213b297/manager/0.log" Jan 28 19:42:28 crc kubenswrapper[4767]: I0128 19:42:28.425584 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-6db5dbd896-kb8qp_e2672b52-b1cf-491e-8f76-46c22b19fbbf/manager/0.log" Jan 28 19:42:28 crc kubenswrapper[4767]: I0128 19:42:28.544081 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-587c6bfdcf-bhkrp_3bbfc3c2-f654-4728-8bc0-da11b96d4246/manager/0.log" Jan 28 19:42:28 crc kubenswrapper[4767]: I0128 19:42:28.653830 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5fb775575f-qcsg7_4129a635-943e-4417-8934-24c408083149/manager/0.log" Jan 28 19:42:28 crc kubenswrapper[4767]: I0128 19:42:28.908336 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-958664b5-4prpg_e3793181-a73c-494b-ba12-a1a908a7d6f5/manager/0.log" Jan 28 19:42:29 crc kubenswrapper[4767]: I0128 19:42:29.094903 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-79955696d6-gkt4m_c44704ef-ddff-4dc9-8e03-ea5bdd14dd8d/manager/0.log" Jan 28 19:42:29 crc kubenswrapper[4767]: I0128 19:42:29.131951 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-6978b79747-l4575_898fb09e-f084-45e3-88bd-7a67ef198bee/manager/0.log" Jan 28 19:42:29 crc kubenswrapper[4767]: I0128 19:42:29.169790 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-765668569f-98mkp_b48deaef-0712-425b-8d49-133c4931ea06/manager/0.log" Jan 28 19:42:29 crc kubenswrapper[4767]: I0128 19:42:29.900179 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-67bf948998-222q9_14052357-c381-4e6b-ad51-13179cd09877/manager/0.log" Jan 28 19:42:29 crc kubenswrapper[4767]: I0128 19:42:29.908873 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-694c5bfc85-vsdzj_58b8adeb-7f68-48a4-a8b3-5e93b6b93ec8/manager/0.log" Jan 28 19:42:30 crc kubenswrapper[4767]: I0128 19:42:30.129197 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-5c765b4558-v7nnq_fbe04280-2c8a-4f54-9442-26fb1f381358/manager/0.log" Jan 28 19:42:30 crc kubenswrapper[4767]: I0128 19:42:30.164064 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-ddcbfd695-mrhz5_a0962425-fb2a-4acc-966b-544669cd2dc6/manager/0.log" Jan 28 19:42:30 crc kubenswrapper[4767]: I0128 19:42:30.340583 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-59c4b45c4dwd2vd_ac7dbf1c-e4ce-4b04-8723-6166810cdf9b/manager/0.log" Jan 28 19:42:30 crc kubenswrapper[4767]: I0128 19:42:30.464980 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-84ff885fbb-jgrkk_39734b99-2733-4aef-b88e-12a87340933c/operator/0.log" Jan 28 19:42:30 crc kubenswrapper[4767]: I0128 19:42:30.702182 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-d78gr_5c19930e-137e-402d-90f2-a25b9ff8117c/registry-server/0.log" Jan 28 19:42:31 crc kubenswrapper[4767]: I0128 19:42:31.025497 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-788c46999f-hq5gx_56088743-c7ed-4882-b2e5-0845caba050e/manager/0.log" Jan 28 19:42:31 crc kubenswrapper[4767]: I0128 19:42:31.132171 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5b964cf4cd-b52rs_a5d0d7c1-8591-4619-912c-8db740ebd050/manager/0.log" Jan 28 19:42:31 crc kubenswrapper[4767]: I0128 19:42:31.868848 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-xjfz2_83eb3c39-bbf1-4059-ae27-c3a8aac5ad69/operator/0.log" Jan 28 19:42:31 crc kubenswrapper[4767]: I0128 19:42:31.987581 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-68fc8c869-w4dzg_77bf9c1d-826f-418d-8a94-80e4d46cc051/manager/0.log" Jan 28 19:42:32 crc kubenswrapper[4767]: I0128 19:42:32.052260 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-77dc76679b-k6848_964b21b6-27a7-4fa9-9f44-ddb1484e7266/manager/0.log" Jan 28 19:42:32 crc kubenswrapper[4767]: I0128 19:42:32.294407 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-56f8bfcd9f-tkgjs_b60452a4-90a3-492d-be1d-f481ed0fce75/manager/0.log" Jan 28 19:42:32 crc kubenswrapper[4767]: I0128 19:42:32.296311 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-79f6f75b9c-dhf5c_0d7c50d3-1348-43e5-a8fa-f05cd53d2a42/manager/0.log" Jan 28 19:42:32 crc kubenswrapper[4767]: I0128 19:42:32.370317 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-767b8bc766-wvg8s_17bdefa0-fed1-4dd5-abde-10d52eebbdb8/manager/0.log" Jan 28 19:42:45 crc kubenswrapper[4767]: I0128 19:42:45.456101 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:42:45 crc kubenswrapper[4767]: I0128 19:42:45.456673 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 28 19:42:45 crc kubenswrapper[4767]: I0128 19:42:45.456719 4767 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" Jan 28 19:42:45 crc kubenswrapper[4767]: I0128 19:42:45.457558 4767 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3"} pod="openshift-machine-config-operator/machine-config-daemon-skvzp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 28 19:42:45 crc kubenswrapper[4767]: I0128 19:42:45.457615 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" containerID="cri-o://8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" gracePeriod=600 Jan 28 19:42:45 crc kubenswrapper[4767]: E0128 19:42:45.579291 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:42:46 crc kubenswrapper[4767]: I0128 19:42:46.360425 4767 generic.go:334] "Generic (PLEG): container finished" podID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" exitCode=0 Jan 28 19:42:46 crc kubenswrapper[4767]: I0128 19:42:46.360479 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerDied","Data":"8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3"} Jan 28 19:42:46 crc kubenswrapper[4767]: I0128 19:42:46.360515 4767 scope.go:117] "RemoveContainer" containerID="7863ea4af4437efd3146e9c7a8845285fbe39a3db1b834dd9d43a44112bed4a1" Jan 28 19:42:46 crc kubenswrapper[4767]: I0128 19:42:46.361013 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:42:46 crc kubenswrapper[4767]: E0128 19:42:46.361263 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:42:51 crc kubenswrapper[4767]: I0128 19:42:51.921875 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-ctkl7_bef36296-0ea5-4316-a31a-c14346fc1597/control-plane-machine-set-operator/0.log" Jan 28 19:42:52 crc kubenswrapper[4767]: I0128 19:42:52.075837 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vqr6q_24a3b544-3082-45d9-956f-1540b9725ea2/kube-rbac-proxy/0.log" Jan 28 19:42:52 crc kubenswrapper[4767]: I0128 19:42:52.096539 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-vqr6q_24a3b544-3082-45d9-956f-1540b9725ea2/machine-api-operator/0.log" Jan 28 19:42:56 crc kubenswrapper[4767]: I0128 19:42:56.796271 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:42:56 crc kubenswrapper[4767]: E0128 19:42:56.796979 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:43:04 crc kubenswrapper[4767]: I0128 19:43:04.585024 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-njmvq_52e7e92c-6b90-43b0-afaa-d90522a999e9/cert-manager-controller/0.log" Jan 28 19:43:04 crc kubenswrapper[4767]: I0128 19:43:04.763264 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-qrhz6_d472b3ea-2a84-47e5-b9ee-56ddb86cfadf/cert-manager-cainjector/0.log" Jan 28 19:43:04 crc kubenswrapper[4767]: I0128 19:43:04.790491 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-twwkv_d775633b-82b2-45f0-bf58-f3d45ee298a7/cert-manager-webhook/0.log" Jan 28 19:43:08 crc kubenswrapper[4767]: I0128 19:43:08.795125 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:43:08 crc kubenswrapper[4767]: E0128 19:43:08.795954 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:43:17 crc kubenswrapper[4767]: I0128 19:43:17.873358 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-465vd_eb6b00a4-771d-49c9-8220-6dcbf9e4a742/nmstate-console-plugin/0.log" Jan 28 19:43:18 crc kubenswrapper[4767]: I0128 19:43:18.027899 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-qhr2g_bef9ca58-f283-48a1-b354-d2c3f061ced9/nmstate-handler/0.log" Jan 28 19:43:18 crc kubenswrapper[4767]: I0128 19:43:18.066852 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-n4gj6_ed568f05-84f3-43fb-b4eb-2adaef551020/kube-rbac-proxy/0.log" Jan 28 19:43:18 crc kubenswrapper[4767]: I0128 19:43:18.111914 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-n4gj6_ed568f05-84f3-43fb-b4eb-2adaef551020/nmstate-metrics/0.log" Jan 28 19:43:18 crc kubenswrapper[4767]: I0128 19:43:18.254158 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-lkld9_a18398dd-746f-4915-8989-211b52555416/nmstate-operator/0.log" Jan 28 19:43:18 crc kubenswrapper[4767]: I0128 19:43:18.338833 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-mdhlv_6f3dc9ef-68cf-4148-a915-fd3d30177771/nmstate-webhook/0.log" Jan 28 19:43:20 crc kubenswrapper[4767]: I0128 19:43:20.795880 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:43:20 crc kubenswrapper[4767]: E0128 19:43:20.796727 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:43:31 crc kubenswrapper[4767]: I0128 19:43:31.806063 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:43:31 crc kubenswrapper[4767]: E0128 19:43:31.809194 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:43:33 crc kubenswrapper[4767]: I0128 19:43:33.157329 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-jcpks_471116e2-94a5-4d2f-bac3-0c312652ae8c/prometheus-operator/0.log" Jan 28 19:43:33 crc kubenswrapper[4767]: I0128 19:43:33.350820 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl_8f08ca15-124c-476c-b9e1-1002aa7edfd7/prometheus-operator-admission-webhook/0.log" Jan 28 19:43:33 crc kubenswrapper[4767]: I0128 19:43:33.430353 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2_c2ee930f-1338-483a-aa28-eaecde4404cb/prometheus-operator-admission-webhook/0.log" Jan 28 19:43:33 crc kubenswrapper[4767]: I0128 19:43:33.610938 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-2kxrn_e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf/operator/0.log" Jan 28 19:43:33 crc kubenswrapper[4767]: I0128 19:43:33.695371 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-pkkhl_3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97/perses-operator/0.log" Jan 28 19:43:43 crc kubenswrapper[4767]: I0128 19:43:43.795526 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:43:43 crc kubenswrapper[4767]: E0128 19:43:43.796281 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:43:49 crc kubenswrapper[4767]: I0128 19:43:49.475436 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-lzbd2_b2e291f1-6f8b-46e8-bc66-7bc0896aef3c/kube-rbac-proxy/0.log" Jan 28 19:43:49 crc kubenswrapper[4767]: I0128 19:43:49.617526 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-lzbd2_b2e291f1-6f8b-46e8-bc66-7bc0896aef3c/controller/0.log" Jan 28 19:43:49 crc kubenswrapper[4767]: I0128 19:43:49.748340 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-frr-files/0.log" Jan 28 19:43:49 crc kubenswrapper[4767]: I0128 19:43:49.939166 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-frr-files/0.log" Jan 28 19:43:49 crc kubenswrapper[4767]: I0128 19:43:49.951884 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-metrics/0.log" Jan 28 19:43:49 crc kubenswrapper[4767]: I0128 19:43:49.983401 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-reloader/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.000536 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-reloader/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.215953 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-reloader/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.219335 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-frr-files/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.241610 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-metrics/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.286023 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-metrics/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.531400 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-frr-files/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.532909 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-metrics/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.558160 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/controller/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.583194 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/cp-reloader/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.750778 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/frr-metrics/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.791790 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/kube-rbac-proxy/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.845426 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/kube-rbac-proxy-frr/0.log" Jan 28 19:43:50 crc kubenswrapper[4767]: I0128 19:43:50.943426 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/reloader/0.log" Jan 28 19:43:51 crc kubenswrapper[4767]: I0128 19:43:51.106794 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-584q6_db113ec8-a92c-4ddb-abc6-d49a3fb842f3/frr-k8s-webhook-server/0.log" Jan 28 19:43:51 crc kubenswrapper[4767]: I0128 19:43:51.484083 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-6584c89557-hp6s4_a4b668f3-0c0f-43bb-8d16-c460c756226c/webhook-server/0.log" Jan 28 19:43:51 crc kubenswrapper[4767]: I0128 19:43:51.534949 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6f4744495b-dgmjt_2b81ef34-3602-4a95-adaf-e2168b7c2827/manager/0.log" Jan 28 19:43:51 crc kubenswrapper[4767]: I0128 19:43:51.918817 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-lp8ks_92854eca-77c5-4859-bfdb-21b7b6c96c98/kube-rbac-proxy/0.log" Jan 28 19:43:52 crc kubenswrapper[4767]: I0128 19:43:52.593940 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-lp8ks_92854eca-77c5-4859-bfdb-21b7b6c96c98/speaker/0.log" Jan 28 19:43:52 crc kubenswrapper[4767]: I0128 19:43:52.982457 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-ngm5b_e21010b1-73fb-4d7f-981a-a64a10495e7d/frr/0.log" Jan 28 19:43:54 crc kubenswrapper[4767]: I0128 19:43:54.825687 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:43:54 crc kubenswrapper[4767]: E0128 19:43:54.826328 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.312663 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-fpbqk"] Jan 28 19:44:02 crc kubenswrapper[4767]: E0128 19:44:02.313848 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="extract-utilities" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.313866 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="extract-utilities" Jan 28 19:44:02 crc kubenswrapper[4767]: E0128 19:44:02.313878 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="registry-server" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.313884 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="registry-server" Jan 28 19:44:02 crc kubenswrapper[4767]: E0128 19:44:02.313903 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="extract-content" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.313908 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="extract-content" Jan 28 19:44:02 crc kubenswrapper[4767]: E0128 19:44:02.313921 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="730da748-48cb-4d3e-9b3a-d5a3556f8254" containerName="container-00" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.313927 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="730da748-48cb-4d3e-9b3a-d5a3556f8254" containerName="container-00" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.314107 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="d61c67f5-a051-422e-adb7-8f6769e3bae5" containerName="registry-server" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.314119 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="730da748-48cb-4d3e-9b3a-d5a3556f8254" containerName="container-00" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.315493 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.333773 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpbqk"] Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.390833 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-utilities\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.391395 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v47zp\" (UniqueName: \"kubernetes.io/projected/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-kube-api-access-v47zp\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.391537 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-catalog-content\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.494117 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v47zp\" (UniqueName: \"kubernetes.io/projected/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-kube-api-access-v47zp\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.494248 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-catalog-content\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.494295 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-utilities\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.494791 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-catalog-content\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.494818 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-utilities\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.517146 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v47zp\" (UniqueName: \"kubernetes.io/projected/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-kube-api-access-v47zp\") pod \"redhat-marketplace-fpbqk\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:02 crc kubenswrapper[4767]: I0128 19:44:02.634770 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:03 crc kubenswrapper[4767]: I0128 19:44:03.179567 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpbqk"] Jan 28 19:44:04 crc kubenswrapper[4767]: I0128 19:44:04.167927 4767 generic.go:334] "Generic (PLEG): container finished" podID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerID="aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f" exitCode=0 Jan 28 19:44:04 crc kubenswrapper[4767]: I0128 19:44:04.168016 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpbqk" event={"ID":"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2","Type":"ContainerDied","Data":"aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f"} Jan 28 19:44:04 crc kubenswrapper[4767]: I0128 19:44:04.168460 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpbqk" event={"ID":"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2","Type":"ContainerStarted","Data":"8091ef9f0fb8e93f80ebfe23157a670109e9475d23e7a50bed1160f7f4b1513c"} Jan 28 19:44:06 crc kubenswrapper[4767]: I0128 19:44:06.188882 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpbqk" event={"ID":"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2","Type":"ContainerStarted","Data":"14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a"} Jan 28 19:44:07 crc kubenswrapper[4767]: I0128 19:44:07.198229 4767 generic.go:334] "Generic (PLEG): container finished" podID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerID="14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a" exitCode=0 Jan 28 19:44:07 crc kubenswrapper[4767]: I0128 19:44:07.198424 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpbqk" event={"ID":"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2","Type":"ContainerDied","Data":"14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a"} Jan 28 19:44:08 crc kubenswrapper[4767]: I0128 19:44:08.210648 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpbqk" event={"ID":"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2","Type":"ContainerStarted","Data":"b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789"} Jan 28 19:44:08 crc kubenswrapper[4767]: I0128 19:44:08.446394 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28_6eecf577-4963-45f9-a5fa-96bfae201c3c/util/0.log" Jan 28 19:44:08 crc kubenswrapper[4767]: I0128 19:44:08.754630 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28_6eecf577-4963-45f9-a5fa-96bfae201c3c/pull/0.log" Jan 28 19:44:08 crc kubenswrapper[4767]: I0128 19:44:08.810318 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28_6eecf577-4963-45f9-a5fa-96bfae201c3c/util/0.log" Jan 28 19:44:08 crc kubenswrapper[4767]: I0128 19:44:08.823269 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28_6eecf577-4963-45f9-a5fa-96bfae201c3c/pull/0.log" Jan 28 19:44:08 crc kubenswrapper[4767]: I0128 19:44:08.978871 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28_6eecf577-4963-45f9-a5fa-96bfae201c3c/util/0.log" Jan 28 19:44:08 crc kubenswrapper[4767]: I0128 19:44:08.979652 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28_6eecf577-4963-45f9-a5fa-96bfae201c3c/pull/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.073729 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcg5b28_6eecf577-4963-45f9-a5fa-96bfae201c3c/extract/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.207837 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh_cedc3704-963c-4161-b9c9-cf2b6d8ea555/util/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.410969 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh_cedc3704-963c-4161-b9c9-cf2b6d8ea555/util/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.457511 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh_cedc3704-963c-4161-b9c9-cf2b6d8ea555/pull/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.702568 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh_cedc3704-963c-4161-b9c9-cf2b6d8ea555/pull/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.796452 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:44:09 crc kubenswrapper[4767]: E0128 19:44:09.796712 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.820959 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh_cedc3704-963c-4161-b9c9-cf2b6d8ea555/util/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.842258 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh_cedc3704-963c-4161-b9c9-cf2b6d8ea555/pull/0.log" Jan 28 19:44:09 crc kubenswrapper[4767]: I0128 19:44:09.898484 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec71364vrh_cedc3704-963c-4161-b9c9-cf2b6d8ea555/extract/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.002763 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j_e8dad657-0de1-4111-912b-f07ad63a264f/util/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.188590 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j_e8dad657-0de1-4111-912b-f07ad63a264f/util/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.215887 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j_e8dad657-0de1-4111-912b-f07ad63a264f/pull/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.234707 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j_e8dad657-0de1-4111-912b-f07ad63a264f/pull/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.434512 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j_e8dad657-0de1-4111-912b-f07ad63a264f/util/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.445822 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j_e8dad657-0de1-4111-912b-f07ad63a264f/pull/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.499379 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_98629960b44b381d1a86cff1d1439a8df43509c9ad24579158c59d0f08xcj8j_e8dad657-0de1-4111-912b-f07ad63a264f/extract/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.600409 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qf5wb_0f72d947-439a-4097-bdd3-f695dc72ae90/extract-utilities/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.802948 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qf5wb_0f72d947-439a-4097-bdd3-f695dc72ae90/extract-utilities/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.849535 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qf5wb_0f72d947-439a-4097-bdd3-f695dc72ae90/extract-content/0.log" Jan 28 19:44:10 crc kubenswrapper[4767]: I0128 19:44:10.881044 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qf5wb_0f72d947-439a-4097-bdd3-f695dc72ae90/extract-content/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.022600 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qf5wb_0f72d947-439a-4097-bdd3-f695dc72ae90/extract-utilities/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.029866 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qf5wb_0f72d947-439a-4097-bdd3-f695dc72ae90/extract-content/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.273252 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8mdrf_7644cd39-83ef-4613-ac2e-774f7d8efd0c/extract-utilities/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.558868 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8mdrf_7644cd39-83ef-4613-ac2e-774f7d8efd0c/extract-content/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.591593 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8mdrf_7644cd39-83ef-4613-ac2e-774f7d8efd0c/extract-content/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.772637 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-qf5wb_0f72d947-439a-4097-bdd3-f695dc72ae90/registry-server/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.775370 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8mdrf_7644cd39-83ef-4613-ac2e-774f7d8efd0c/extract-utilities/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.901564 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8mdrf_7644cd39-83ef-4613-ac2e-774f7d8efd0c/extract-utilities/0.log" Jan 28 19:44:11 crc kubenswrapper[4767]: I0128 19:44:11.973084 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8mdrf_7644cd39-83ef-4613-ac2e-774f7d8efd0c/extract-content/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.195360 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-6nztk_bead226a-cfb9-45a4-b4ec-0c910a29c78a/marketplace-operator/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.408275 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fpbqk_cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/extract-utilities/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.610883 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fpbqk_cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/extract-utilities/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.634984 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.635027 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.687994 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fpbqk_cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/extract-content/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.690386 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.712646 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-8mdrf_7644cd39-83ef-4613-ac2e-774f7d8efd0c/registry-server/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.721416 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-fpbqk" podStartSLOduration=7.251740509 podStartE2EDuration="10.721390959s" podCreationTimestamp="2026-01-28 19:44:02 +0000 UTC" firstStartedPulling="2026-01-28 19:44:04.171619958 +0000 UTC m=+4450.135802832" lastFinishedPulling="2026-01-28 19:44:07.641270408 +0000 UTC m=+4453.605453282" observedRunningTime="2026-01-28 19:44:08.233843626 +0000 UTC m=+4454.198026500" watchObservedRunningTime="2026-01-28 19:44:12.721390959 +0000 UTC m=+4458.685573843" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.742414 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fpbqk_cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/extract-content/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.861221 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fpbqk_cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/extract-utilities/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.890498 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fpbqk_cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/extract-content/0.log" Jan 28 19:44:12 crc kubenswrapper[4767]: I0128 19:44:12.951854 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-fpbqk_cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/registry-server/0.log" Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.025789 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kwvlm_d53c5b1a-7ab8-45e2-8a14-4698bb28b94e/extract-utilities/0.log" Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.196311 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kwvlm_d53c5b1a-7ab8-45e2-8a14-4698bb28b94e/extract-utilities/0.log" Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.200795 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kwvlm_d53c5b1a-7ab8-45e2-8a14-4698bb28b94e/extract-content/0.log" Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.274355 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kwvlm_d53c5b1a-7ab8-45e2-8a14-4698bb28b94e/extract-content/0.log" Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.886905 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.950017 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpbqk"] Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.971876 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kwvlm_d53c5b1a-7ab8-45e2-8a14-4698bb28b94e/extract-content/0.log" Jan 28 19:44:13 crc kubenswrapper[4767]: I0128 19:44:13.972580 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kwvlm_d53c5b1a-7ab8-45e2-8a14-4698bb28b94e/extract-utilities/0.log" Jan 28 19:44:14 crc kubenswrapper[4767]: I0128 19:44:14.050133 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jfj5m_d0824466-227b-42c2-995d-6ab5dde3f5c2/extract-utilities/0.log" Jan 28 19:44:14 crc kubenswrapper[4767]: I0128 19:44:14.215365 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-kwvlm_d53c5b1a-7ab8-45e2-8a14-4698bb28b94e/registry-server/0.log" Jan 28 19:44:14 crc kubenswrapper[4767]: I0128 19:44:14.234719 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jfj5m_d0824466-227b-42c2-995d-6ab5dde3f5c2/extract-content/0.log" Jan 28 19:44:14 crc kubenswrapper[4767]: I0128 19:44:14.252773 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jfj5m_d0824466-227b-42c2-995d-6ab5dde3f5c2/extract-utilities/0.log" Jan 28 19:44:14 crc kubenswrapper[4767]: I0128 19:44:14.285793 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jfj5m_d0824466-227b-42c2-995d-6ab5dde3f5c2/extract-content/0.log" Jan 28 19:44:14 crc kubenswrapper[4767]: I0128 19:44:14.505985 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jfj5m_d0824466-227b-42c2-995d-6ab5dde3f5c2/extract-utilities/0.log" Jan 28 19:44:14 crc kubenswrapper[4767]: I0128 19:44:14.506694 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jfj5m_d0824466-227b-42c2-995d-6ab5dde3f5c2/extract-content/0.log" Jan 28 19:44:15 crc kubenswrapper[4767]: I0128 19:44:15.027606 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-jfj5m_d0824466-227b-42c2-995d-6ab5dde3f5c2/registry-server/0.log" Jan 28 19:44:15 crc kubenswrapper[4767]: I0128 19:44:15.282364 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-fpbqk" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="registry-server" containerID="cri-o://b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789" gracePeriod=2 Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.112976 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.174054 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-catalog-content\") pod \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.174134 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47zp\" (UniqueName: \"kubernetes.io/projected/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-kube-api-access-v47zp\") pod \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.180869 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-kube-api-access-v47zp" (OuterVolumeSpecName: "kube-api-access-v47zp") pod "cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" (UID: "cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2"). InnerVolumeSpecName "kube-api-access-v47zp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.216870 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" (UID: "cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.276177 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-utilities\") pod \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\" (UID: \"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2\") " Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.276921 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.276946 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47zp\" (UniqueName: \"kubernetes.io/projected/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-kube-api-access-v47zp\") on node \"crc\" DevicePath \"\"" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.277197 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-utilities" (OuterVolumeSpecName: "utilities") pod "cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" (UID: "cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.292847 4767 generic.go:334] "Generic (PLEG): container finished" podID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerID="b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789" exitCode=0 Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.292906 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpbqk" event={"ID":"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2","Type":"ContainerDied","Data":"b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789"} Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.292934 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-fpbqk" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.293141 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-fpbqk" event={"ID":"cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2","Type":"ContainerDied","Data":"8091ef9f0fb8e93f80ebfe23157a670109e9475d23e7a50bed1160f7f4b1513c"} Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.293305 4767 scope.go:117] "RemoveContainer" containerID="b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.328331 4767 scope.go:117] "RemoveContainer" containerID="14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.332568 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpbqk"] Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.342046 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-fpbqk"] Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.359407 4767 scope.go:117] "RemoveContainer" containerID="aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.377734 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.401394 4767 scope.go:117] "RemoveContainer" containerID="b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789" Jan 28 19:44:16 crc kubenswrapper[4767]: E0128 19:44:16.402506 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789\": container with ID starting with b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789 not found: ID does not exist" containerID="b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.402546 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789"} err="failed to get container status \"b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789\": rpc error: code = NotFound desc = could not find container \"b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789\": container with ID starting with b436f9448ff1b8152ed0550f2951ebe91fba5be9736b75624e79f5a8387c5789 not found: ID does not exist" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.402568 4767 scope.go:117] "RemoveContainer" containerID="14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a" Jan 28 19:44:16 crc kubenswrapper[4767]: E0128 19:44:16.402972 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a\": container with ID starting with 14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a not found: ID does not exist" containerID="14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.403017 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a"} err="failed to get container status \"14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a\": rpc error: code = NotFound desc = could not find container \"14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a\": container with ID starting with 14b31cb155d018d1f2a55ef0c16b58f7ecbf4ab55356d1872cfcefbe3817918a not found: ID does not exist" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.403059 4767 scope.go:117] "RemoveContainer" containerID="aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f" Jan 28 19:44:16 crc kubenswrapper[4767]: E0128 19:44:16.403561 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f\": container with ID starting with aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f not found: ID does not exist" containerID="aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.403597 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f"} err="failed to get container status \"aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f\": rpc error: code = NotFound desc = could not find container \"aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f\": container with ID starting with aa44a4c2b7b8e996ff23a187101303b89197a24dbc2df060093392690e06a90f not found: ID does not exist" Jan 28 19:44:16 crc kubenswrapper[4767]: I0128 19:44:16.809724 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" path="/var/lib/kubelet/pods/cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2/volumes" Jan 28 19:44:23 crc kubenswrapper[4767]: I0128 19:44:23.795648 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:44:23 crc kubenswrapper[4767]: E0128 19:44:23.796473 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:44:29 crc kubenswrapper[4767]: I0128 19:44:29.350121 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-68bc856cb9-jcpks_471116e2-94a5-4d2f-bac3-0c312652ae8c/prometheus-operator/0.log" Jan 28 19:44:29 crc kubenswrapper[4767]: I0128 19:44:29.424122 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7cf9db4745-22mzl_8f08ca15-124c-476c-b9e1-1002aa7edfd7/prometheus-operator-admission-webhook/0.log" Jan 28 19:44:29 crc kubenswrapper[4767]: I0128 19:44:29.475762 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_obo-prometheus-operator-admission-webhook-7cf9db4745-vd9z2_c2ee930f-1338-483a-aa28-eaecde4404cb/prometheus-operator-admission-webhook/0.log" Jan 28 19:44:29 crc kubenswrapper[4767]: I0128 19:44:29.556149 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_observability-operator-59bdc8b94-2kxrn_e9a7fad9-cfcc-431b-a8ac-54bbf94fb5cf/operator/0.log" Jan 28 19:44:29 crc kubenswrapper[4767]: I0128 19:44:29.631641 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-operators_perses-operator-5bf474d74f-pkkhl_3fad5c65-5d70-4fb5-8f86-3a8cfb1d9f97/perses-operator/0.log" Jan 28 19:44:35 crc kubenswrapper[4767]: I0128 19:44:35.795561 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:44:35 crc kubenswrapper[4767]: E0128 19:44:35.796421 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.269621 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-rh7j5"] Jan 28 19:44:39 crc kubenswrapper[4767]: E0128 19:44:39.270715 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="extract-utilities" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.270735 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="extract-utilities" Jan 28 19:44:39 crc kubenswrapper[4767]: E0128 19:44:39.270774 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="registry-server" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.270782 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="registry-server" Jan 28 19:44:39 crc kubenswrapper[4767]: E0128 19:44:39.270791 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="extract-content" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.270800 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="extract-content" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.271051 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="cb6f1e52-d0ea-4752-b4d4-ca4f8465a8c2" containerName="registry-server" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.273102 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.283195 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rh7j5"] Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.368575 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-catalog-content\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.368764 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xklj\" (UniqueName: \"kubernetes.io/projected/e97d5112-b93c-4a3c-a23d-10a1e141e85e-kube-api-access-4xklj\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.369128 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-utilities\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.471496 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-utilities\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.471616 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-catalog-content\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.471685 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xklj\" (UniqueName: \"kubernetes.io/projected/e97d5112-b93c-4a3c-a23d-10a1e141e85e-kube-api-access-4xklj\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.472369 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-utilities\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.472383 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-catalog-content\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.505623 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xklj\" (UniqueName: \"kubernetes.io/projected/e97d5112-b93c-4a3c-a23d-10a1e141e85e-kube-api-access-4xklj\") pod \"redhat-operators-rh7j5\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:39 crc kubenswrapper[4767]: I0128 19:44:39.604202 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:40 crc kubenswrapper[4767]: I0128 19:44:40.157311 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-rh7j5"] Jan 28 19:44:40 crc kubenswrapper[4767]: I0128 19:44:40.511387 4767 generic.go:334] "Generic (PLEG): container finished" podID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerID="9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd" exitCode=0 Jan 28 19:44:40 crc kubenswrapper[4767]: I0128 19:44:40.511631 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh7j5" event={"ID":"e97d5112-b93c-4a3c-a23d-10a1e141e85e","Type":"ContainerDied","Data":"9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd"} Jan 28 19:44:40 crc kubenswrapper[4767]: I0128 19:44:40.511655 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh7j5" event={"ID":"e97d5112-b93c-4a3c-a23d-10a1e141e85e","Type":"ContainerStarted","Data":"df6e2de71f6b3f3bc44102dd8345b7b11a90993c52038c04ab538b0f357a0174"} Jan 28 19:44:41 crc kubenswrapper[4767]: I0128 19:44:41.552172 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh7j5" event={"ID":"e97d5112-b93c-4a3c-a23d-10a1e141e85e","Type":"ContainerStarted","Data":"91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601"} Jan 28 19:44:42 crc kubenswrapper[4767]: E0128 19:44:42.448848 4767 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.110:39460->38.102.83.110:43805: write tcp 38.102.83.110:39460->38.102.83.110:43805: write: broken pipe Jan 28 19:44:47 crc kubenswrapper[4767]: I0128 19:44:47.609928 4767 generic.go:334] "Generic (PLEG): container finished" podID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerID="91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601" exitCode=0 Jan 28 19:44:47 crc kubenswrapper[4767]: I0128 19:44:47.609983 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh7j5" event={"ID":"e97d5112-b93c-4a3c-a23d-10a1e141e85e","Type":"ContainerDied","Data":"91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601"} Jan 28 19:44:48 crc kubenswrapper[4767]: I0128 19:44:48.624168 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh7j5" event={"ID":"e97d5112-b93c-4a3c-a23d-10a1e141e85e","Type":"ContainerStarted","Data":"ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7"} Jan 28 19:44:48 crc kubenswrapper[4767]: I0128 19:44:48.654131 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-rh7j5" podStartSLOduration=2.081925272 podStartE2EDuration="9.654112129s" podCreationTimestamp="2026-01-28 19:44:39 +0000 UTC" firstStartedPulling="2026-01-28 19:44:40.515636552 +0000 UTC m=+4486.479819426" lastFinishedPulling="2026-01-28 19:44:48.087823419 +0000 UTC m=+4494.052006283" observedRunningTime="2026-01-28 19:44:48.648750751 +0000 UTC m=+4494.612933645" watchObservedRunningTime="2026-01-28 19:44:48.654112129 +0000 UTC m=+4494.618295003" Jan 28 19:44:49 crc kubenswrapper[4767]: I0128 19:44:49.605725 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:49 crc kubenswrapper[4767]: I0128 19:44:49.606112 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:44:50 crc kubenswrapper[4767]: I0128 19:44:50.661489 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rh7j5" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="registry-server" probeResult="failure" output=< Jan 28 19:44:50 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 19:44:50 crc kubenswrapper[4767]: > Jan 28 19:44:50 crc kubenswrapper[4767]: I0128 19:44:50.796618 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:44:50 crc kubenswrapper[4767]: E0128 19:44:50.796972 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.204934 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v"] Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.208936 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.211682 4767 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.211863 4767 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.217130 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v"] Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.309734 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e821101f-7c9e-47df-ad3e-b64153d3ca92-config-volume\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.309828 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kl4pj\" (UniqueName: \"kubernetes.io/projected/e821101f-7c9e-47df-ad3e-b64153d3ca92-kube-api-access-kl4pj\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.310315 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e821101f-7c9e-47df-ad3e-b64153d3ca92-secret-volume\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.412527 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e821101f-7c9e-47df-ad3e-b64153d3ca92-secret-volume\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.412635 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e821101f-7c9e-47df-ad3e-b64153d3ca92-config-volume\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.412676 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kl4pj\" (UniqueName: \"kubernetes.io/projected/e821101f-7c9e-47df-ad3e-b64153d3ca92-kube-api-access-kl4pj\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.413959 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e821101f-7c9e-47df-ad3e-b64153d3ca92-config-volume\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.429398 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e821101f-7c9e-47df-ad3e-b64153d3ca92-secret-volume\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.432550 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kl4pj\" (UniqueName: \"kubernetes.io/projected/e821101f-7c9e-47df-ad3e-b64153d3ca92-kube-api-access-kl4pj\") pod \"collect-profiles-29493825-lxw7v\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.533881 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:00 crc kubenswrapper[4767]: I0128 19:45:00.659012 4767 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-rh7j5" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="registry-server" probeResult="failure" output=< Jan 28 19:45:00 crc kubenswrapper[4767]: timeout: failed to connect service ":50051" within 1s Jan 28 19:45:00 crc kubenswrapper[4767]: > Jan 28 19:45:01 crc kubenswrapper[4767]: I0128 19:45:01.007540 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v"] Jan 28 19:45:01 crc kubenswrapper[4767]: I0128 19:45:01.756063 4767 generic.go:334] "Generic (PLEG): container finished" podID="e821101f-7c9e-47df-ad3e-b64153d3ca92" containerID="d7a859967c82ae887994830471349f76cfa4ad0a9b9a49ac0f444e9e02819aaf" exitCode=0 Jan 28 19:45:01 crc kubenswrapper[4767]: I0128 19:45:01.756435 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" event={"ID":"e821101f-7c9e-47df-ad3e-b64153d3ca92","Type":"ContainerDied","Data":"d7a859967c82ae887994830471349f76cfa4ad0a9b9a49ac0f444e9e02819aaf"} Jan 28 19:45:01 crc kubenswrapper[4767]: I0128 19:45:01.756483 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" event={"ID":"e821101f-7c9e-47df-ad3e-b64153d3ca92","Type":"ContainerStarted","Data":"5ac37872e520dc811debaacc464dbc44d6c43a3c6a0deeb213e1a61e78be64f7"} Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.151680 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.282980 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kl4pj\" (UniqueName: \"kubernetes.io/projected/e821101f-7c9e-47df-ad3e-b64153d3ca92-kube-api-access-kl4pj\") pod \"e821101f-7c9e-47df-ad3e-b64153d3ca92\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.283098 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e821101f-7c9e-47df-ad3e-b64153d3ca92-config-volume\") pod \"e821101f-7c9e-47df-ad3e-b64153d3ca92\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.283186 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e821101f-7c9e-47df-ad3e-b64153d3ca92-secret-volume\") pod \"e821101f-7c9e-47df-ad3e-b64153d3ca92\" (UID: \"e821101f-7c9e-47df-ad3e-b64153d3ca92\") " Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.283768 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e821101f-7c9e-47df-ad3e-b64153d3ca92-config-volume" (OuterVolumeSpecName: "config-volume") pod "e821101f-7c9e-47df-ad3e-b64153d3ca92" (UID: "e821101f-7c9e-47df-ad3e-b64153d3ca92"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.288708 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e821101f-7c9e-47df-ad3e-b64153d3ca92-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e821101f-7c9e-47df-ad3e-b64153d3ca92" (UID: "e821101f-7c9e-47df-ad3e-b64153d3ca92"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.289107 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e821101f-7c9e-47df-ad3e-b64153d3ca92-kube-api-access-kl4pj" (OuterVolumeSpecName: "kube-api-access-kl4pj") pod "e821101f-7c9e-47df-ad3e-b64153d3ca92" (UID: "e821101f-7c9e-47df-ad3e-b64153d3ca92"). InnerVolumeSpecName "kube-api-access-kl4pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.385526 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kl4pj\" (UniqueName: \"kubernetes.io/projected/e821101f-7c9e-47df-ad3e-b64153d3ca92-kube-api-access-kl4pj\") on node \"crc\" DevicePath \"\"" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.385557 4767 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e821101f-7c9e-47df-ad3e-b64153d3ca92-config-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.385569 4767 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e821101f-7c9e-47df-ad3e-b64153d3ca92-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.779294 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" event={"ID":"e821101f-7c9e-47df-ad3e-b64153d3ca92","Type":"ContainerDied","Data":"5ac37872e520dc811debaacc464dbc44d6c43a3c6a0deeb213e1a61e78be64f7"} Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.779350 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29493825-lxw7v" Jan 28 19:45:03 crc kubenswrapper[4767]: I0128 19:45:03.779360 4767 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ac37872e520dc811debaacc464dbc44d6c43a3c6a0deeb213e1a61e78be64f7" Jan 28 19:45:04 crc kubenswrapper[4767]: I0128 19:45:04.229896 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd"] Jan 28 19:45:04 crc kubenswrapper[4767]: I0128 19:45:04.239375 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29493780-qlmdd"] Jan 28 19:45:04 crc kubenswrapper[4767]: I0128 19:45:04.795949 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:45:04 crc kubenswrapper[4767]: E0128 19:45:04.796382 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:45:04 crc kubenswrapper[4767]: I0128 19:45:04.809583 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5057532f-71be-41b1-8c80-248b78eb8d20" path="/var/lib/kubelet/pods/5057532f-71be-41b1-8c80-248b78eb8d20/volumes" Jan 28 19:45:09 crc kubenswrapper[4767]: I0128 19:45:09.663028 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:45:09 crc kubenswrapper[4767]: I0128 19:45:09.715032 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:45:10 crc kubenswrapper[4767]: I0128 19:45:10.482102 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rh7j5"] Jan 28 19:45:10 crc kubenswrapper[4767]: I0128 19:45:10.871528 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-rh7j5" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="registry-server" containerID="cri-o://ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7" gracePeriod=2 Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.859263 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.884279 4767 generic.go:334] "Generic (PLEG): container finished" podID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerID="ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7" exitCode=0 Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.884330 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh7j5" event={"ID":"e97d5112-b93c-4a3c-a23d-10a1e141e85e","Type":"ContainerDied","Data":"ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7"} Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.884361 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-rh7j5" event={"ID":"e97d5112-b93c-4a3c-a23d-10a1e141e85e","Type":"ContainerDied","Data":"df6e2de71f6b3f3bc44102dd8345b7b11a90993c52038c04ab538b0f357a0174"} Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.884380 4767 scope.go:117] "RemoveContainer" containerID="ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.884517 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-rh7j5" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.890443 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-utilities\") pod \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.890533 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4xklj\" (UniqueName: \"kubernetes.io/projected/e97d5112-b93c-4a3c-a23d-10a1e141e85e-kube-api-access-4xklj\") pod \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.890580 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-catalog-content\") pod \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\" (UID: \"e97d5112-b93c-4a3c-a23d-10a1e141e85e\") " Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.891796 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-utilities" (OuterVolumeSpecName: "utilities") pod "e97d5112-b93c-4a3c-a23d-10a1e141e85e" (UID: "e97d5112-b93c-4a3c-a23d-10a1e141e85e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.896615 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e97d5112-b93c-4a3c-a23d-10a1e141e85e-kube-api-access-4xklj" (OuterVolumeSpecName: "kube-api-access-4xklj") pod "e97d5112-b93c-4a3c-a23d-10a1e141e85e" (UID: "e97d5112-b93c-4a3c-a23d-10a1e141e85e"). InnerVolumeSpecName "kube-api-access-4xklj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.921972 4767 scope.go:117] "RemoveContainer" containerID="91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.961413 4767 scope.go:117] "RemoveContainer" containerID="9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.993430 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4xklj\" (UniqueName: \"kubernetes.io/projected/e97d5112-b93c-4a3c-a23d-10a1e141e85e-kube-api-access-4xklj\") on node \"crc\" DevicePath \"\"" Jan 28 19:45:11 crc kubenswrapper[4767]: I0128 19:45:11.993464 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.005378 4767 scope.go:117] "RemoveContainer" containerID="ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7" Jan 28 19:45:12 crc kubenswrapper[4767]: E0128 19:45:12.006917 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7\": container with ID starting with ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7 not found: ID does not exist" containerID="ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.006974 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7"} err="failed to get container status \"ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7\": rpc error: code = NotFound desc = could not find container \"ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7\": container with ID starting with ad7241421155ff566acb291425e3942221d5f1a14cfc17a199a795203077b7d7 not found: ID does not exist" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.007008 4767 scope.go:117] "RemoveContainer" containerID="91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601" Jan 28 19:45:12 crc kubenswrapper[4767]: E0128 19:45:12.009102 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601\": container with ID starting with 91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601 not found: ID does not exist" containerID="91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.009148 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601"} err="failed to get container status \"91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601\": rpc error: code = NotFound desc = could not find container \"91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601\": container with ID starting with 91fea2673e4dbcb757e656772da7d402b9f01bb919010a0e8d59defce9c63601 not found: ID does not exist" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.009182 4767 scope.go:117] "RemoveContainer" containerID="9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd" Jan 28 19:45:12 crc kubenswrapper[4767]: E0128 19:45:12.009596 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd\": container with ID starting with 9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd not found: ID does not exist" containerID="9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.009631 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd"} err="failed to get container status \"9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd\": rpc error: code = NotFound desc = could not find container \"9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd\": container with ID starting with 9e6becc27e2709164f196bf8ecd1d6d88d1a041c632e1a6bbaadd842e8d3c4cd not found: ID does not exist" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.058141 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e97d5112-b93c-4a3c-a23d-10a1e141e85e" (UID: "e97d5112-b93c-4a3c-a23d-10a1e141e85e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.096069 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e97d5112-b93c-4a3c-a23d-10a1e141e85e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.227524 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-rh7j5"] Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.238565 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-rh7j5"] Jan 28 19:45:12 crc kubenswrapper[4767]: I0128 19:45:12.819680 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" path="/var/lib/kubelet/pods/e97d5112-b93c-4a3c-a23d-10a1e141e85e/volumes" Jan 28 19:45:18 crc kubenswrapper[4767]: I0128 19:45:18.796880 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:45:18 crc kubenswrapper[4767]: E0128 19:45:18.797535 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:45:32 crc kubenswrapper[4767]: I0128 19:45:32.796065 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:45:32 crc kubenswrapper[4767]: E0128 19:45:32.796838 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:45:46 crc kubenswrapper[4767]: I0128 19:45:46.795936 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:45:46 crc kubenswrapper[4767]: E0128 19:45:46.796763 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:45:54 crc kubenswrapper[4767]: I0128 19:45:54.625385 4767 scope.go:117] "RemoveContainer" containerID="179fda6c939919b807c514737b104ad076da709e228701747f22acd59d26b53e" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.003079 4767 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-9xv9m"] Jan 28 19:45:55 crc kubenswrapper[4767]: E0128 19:45:55.003593 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="extract-utilities" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.003618 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="extract-utilities" Jan 28 19:45:55 crc kubenswrapper[4767]: E0128 19:45:55.003659 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="extract-content" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.003669 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="extract-content" Jan 28 19:45:55 crc kubenswrapper[4767]: E0128 19:45:55.003683 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="registry-server" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.003691 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="registry-server" Jan 28 19:45:55 crc kubenswrapper[4767]: E0128 19:45:55.003711 4767 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e821101f-7c9e-47df-ad3e-b64153d3ca92" containerName="collect-profiles" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.003720 4767 state_mem.go:107] "Deleted CPUSet assignment" podUID="e821101f-7c9e-47df-ad3e-b64153d3ca92" containerName="collect-profiles" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.003947 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e821101f-7c9e-47df-ad3e-b64153d3ca92" containerName="collect-profiles" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.003974 4767 memory_manager.go:354] "RemoveStaleState removing state" podUID="e97d5112-b93c-4a3c-a23d-10a1e141e85e" containerName="registry-server" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.005790 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.038428 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xv9m"] Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.117871 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-utilities\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.118240 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58p89\" (UniqueName: \"kubernetes.io/projected/b74b29e1-1de9-45b8-8531-2b4459a25a71-kube-api-access-58p89\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.118271 4767 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-catalog-content\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.220629 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-utilities\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.220732 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58p89\" (UniqueName: \"kubernetes.io/projected/b74b29e1-1de9-45b8-8531-2b4459a25a71-kube-api-access-58p89\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.220762 4767 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-catalog-content\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.221342 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-utilities\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.221412 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-catalog-content\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.238486 4767 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58p89\" (UniqueName: \"kubernetes.io/projected/b74b29e1-1de9-45b8-8531-2b4459a25a71-kube-api-access-58p89\") pod \"certified-operators-9xv9m\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.327509 4767 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:45:55 crc kubenswrapper[4767]: I0128 19:45:55.895162 4767 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-9xv9m"] Jan 28 19:45:55 crc kubenswrapper[4767]: W0128 19:45:55.897429 4767 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb74b29e1_1de9_45b8_8531_2b4459a25a71.slice/crio-2daf50a5cb5c486fda34b0b1fe5a5a967e04f1b32706eb58726fbf18dbb38e7a WatchSource:0}: Error finding container 2daf50a5cb5c486fda34b0b1fe5a5a967e04f1b32706eb58726fbf18dbb38e7a: Status 404 returned error can't find the container with id 2daf50a5cb5c486fda34b0b1fe5a5a967e04f1b32706eb58726fbf18dbb38e7a Jan 28 19:45:56 crc kubenswrapper[4767]: I0128 19:45:56.328903 4767 generic.go:334] "Generic (PLEG): container finished" podID="b74b29e1-1de9-45b8-8531-2b4459a25a71" containerID="5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61" exitCode=0 Jan 28 19:45:56 crc kubenswrapper[4767]: I0128 19:45:56.328978 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xv9m" event={"ID":"b74b29e1-1de9-45b8-8531-2b4459a25a71","Type":"ContainerDied","Data":"5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61"} Jan 28 19:45:56 crc kubenswrapper[4767]: I0128 19:45:56.329195 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xv9m" event={"ID":"b74b29e1-1de9-45b8-8531-2b4459a25a71","Type":"ContainerStarted","Data":"2daf50a5cb5c486fda34b0b1fe5a5a967e04f1b32706eb58726fbf18dbb38e7a"} Jan 28 19:45:56 crc kubenswrapper[4767]: I0128 19:45:56.332041 4767 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 28 19:45:57 crc kubenswrapper[4767]: I0128 19:45:57.345262 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xv9m" event={"ID":"b74b29e1-1de9-45b8-8531-2b4459a25a71","Type":"ContainerStarted","Data":"1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095"} Jan 28 19:45:58 crc kubenswrapper[4767]: I0128 19:45:58.355700 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xv9m" event={"ID":"b74b29e1-1de9-45b8-8531-2b4459a25a71","Type":"ContainerDied","Data":"1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095"} Jan 28 19:45:58 crc kubenswrapper[4767]: I0128 19:45:58.356471 4767 generic.go:334] "Generic (PLEG): container finished" podID="b74b29e1-1de9-45b8-8531-2b4459a25a71" containerID="1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095" exitCode=0 Jan 28 19:45:59 crc kubenswrapper[4767]: I0128 19:45:59.368858 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xv9m" event={"ID":"b74b29e1-1de9-45b8-8531-2b4459a25a71","Type":"ContainerStarted","Data":"13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16"} Jan 28 19:45:59 crc kubenswrapper[4767]: I0128 19:45:59.401755 4767 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-9xv9m" podStartSLOduration=2.948439638 podStartE2EDuration="5.401733583s" podCreationTimestamp="2026-01-28 19:45:54 +0000 UTC" firstStartedPulling="2026-01-28 19:45:56.331534323 +0000 UTC m=+4562.295717217" lastFinishedPulling="2026-01-28 19:45:58.784828288 +0000 UTC m=+4564.749011162" observedRunningTime="2026-01-28 19:45:59.391009674 +0000 UTC m=+4565.355192558" watchObservedRunningTime="2026-01-28 19:45:59.401733583 +0000 UTC m=+4565.365916457" Jan 28 19:45:59 crc kubenswrapper[4767]: I0128 19:45:59.796569 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:45:59 crc kubenswrapper[4767]: E0128 19:45:59.796829 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:46:05 crc kubenswrapper[4767]: I0128 19:46:05.328025 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:46:05 crc kubenswrapper[4767]: I0128 19:46:05.328617 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:46:05 crc kubenswrapper[4767]: I0128 19:46:05.385115 4767 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:46:05 crc kubenswrapper[4767]: I0128 19:46:05.481502 4767 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:46:05 crc kubenswrapper[4767]: I0128 19:46:05.637058 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xv9m"] Jan 28 19:46:07 crc kubenswrapper[4767]: I0128 19:46:07.448893 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-9xv9m" podUID="b74b29e1-1de9-45b8-8531-2b4459a25a71" containerName="registry-server" containerID="cri-o://13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16" gracePeriod=2 Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.132722 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.210334 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-utilities\") pod \"b74b29e1-1de9-45b8-8531-2b4459a25a71\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.210578 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-catalog-content\") pod \"b74b29e1-1de9-45b8-8531-2b4459a25a71\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.210611 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58p89\" (UniqueName: \"kubernetes.io/projected/b74b29e1-1de9-45b8-8531-2b4459a25a71-kube-api-access-58p89\") pod \"b74b29e1-1de9-45b8-8531-2b4459a25a71\" (UID: \"b74b29e1-1de9-45b8-8531-2b4459a25a71\") " Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.213247 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-utilities" (OuterVolumeSpecName: "utilities") pod "b74b29e1-1de9-45b8-8531-2b4459a25a71" (UID: "b74b29e1-1de9-45b8-8531-2b4459a25a71"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.219223 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b74b29e1-1de9-45b8-8531-2b4459a25a71-kube-api-access-58p89" (OuterVolumeSpecName: "kube-api-access-58p89") pod "b74b29e1-1de9-45b8-8531-2b4459a25a71" (UID: "b74b29e1-1de9-45b8-8531-2b4459a25a71"). InnerVolumeSpecName "kube-api-access-58p89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.290072 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b74b29e1-1de9-45b8-8531-2b4459a25a71" (UID: "b74b29e1-1de9-45b8-8531-2b4459a25a71"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.313405 4767 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-utilities\") on node \"crc\" DevicePath \"\"" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.313465 4767 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b74b29e1-1de9-45b8-8531-2b4459a25a71-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.313513 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58p89\" (UniqueName: \"kubernetes.io/projected/b74b29e1-1de9-45b8-8531-2b4459a25a71-kube-api-access-58p89\") on node \"crc\" DevicePath \"\"" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.460845 4767 generic.go:334] "Generic (PLEG): container finished" podID="b74b29e1-1de9-45b8-8531-2b4459a25a71" containerID="13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16" exitCode=0 Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.460901 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xv9m" event={"ID":"b74b29e1-1de9-45b8-8531-2b4459a25a71","Type":"ContainerDied","Data":"13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16"} Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.460930 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-9xv9m" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.460945 4767 scope.go:117] "RemoveContainer" containerID="13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.460932 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-9xv9m" event={"ID":"b74b29e1-1de9-45b8-8531-2b4459a25a71","Type":"ContainerDied","Data":"2daf50a5cb5c486fda34b0b1fe5a5a967e04f1b32706eb58726fbf18dbb38e7a"} Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.501979 4767 scope.go:117] "RemoveContainer" containerID="1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.524300 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-9xv9m"] Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.531533 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-9xv9m"] Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.540614 4767 scope.go:117] "RemoveContainer" containerID="5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.578794 4767 scope.go:117] "RemoveContainer" containerID="13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16" Jan 28 19:46:08 crc kubenswrapper[4767]: E0128 19:46:08.581485 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16\": container with ID starting with 13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16 not found: ID does not exist" containerID="13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.581528 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16"} err="failed to get container status \"13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16\": rpc error: code = NotFound desc = could not find container \"13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16\": container with ID starting with 13ad0fb7345a8c9f936f733cc7fe1a8096b41de7d8b290400037391e8137db16 not found: ID does not exist" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.581550 4767 scope.go:117] "RemoveContainer" containerID="1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095" Jan 28 19:46:08 crc kubenswrapper[4767]: E0128 19:46:08.581944 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095\": container with ID starting with 1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095 not found: ID does not exist" containerID="1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.581984 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095"} err="failed to get container status \"1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095\": rpc error: code = NotFound desc = could not find container \"1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095\": container with ID starting with 1beeeb0edd14b2afb633d8b038d7534030dd3e06e18830c4901c6860fff55095 not found: ID does not exist" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.582014 4767 scope.go:117] "RemoveContainer" containerID="5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61" Jan 28 19:46:08 crc kubenswrapper[4767]: E0128 19:46:08.582432 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61\": container with ID starting with 5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61 not found: ID does not exist" containerID="5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.582466 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61"} err="failed to get container status \"5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61\": rpc error: code = NotFound desc = could not find container \"5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61\": container with ID starting with 5c0a462d52803afd4220e3b4c38511858a46eaaedab9fc07d954063ff5ac0e61 not found: ID does not exist" Jan 28 19:46:08 crc kubenswrapper[4767]: I0128 19:46:08.808159 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b74b29e1-1de9-45b8-8531-2b4459a25a71" path="/var/lib/kubelet/pods/b74b29e1-1de9-45b8-8531-2b4459a25a71/volumes" Jan 28 19:46:13 crc kubenswrapper[4767]: I0128 19:46:13.796462 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:46:13 crc kubenswrapper[4767]: E0128 19:46:13.797231 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:46:15 crc kubenswrapper[4767]: I0128 19:46:15.522658 4767 generic.go:334] "Generic (PLEG): container finished" podID="9b3e99fa-e0cd-4241-9baf-39ebb0c5362d" containerID="47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd" exitCode=0 Jan 28 19:46:15 crc kubenswrapper[4767]: I0128 19:46:15.522821 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" event={"ID":"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d","Type":"ContainerDied","Data":"47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd"} Jan 28 19:46:15 crc kubenswrapper[4767]: I0128 19:46:15.523746 4767 scope.go:117] "RemoveContainer" containerID="47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd" Jan 28 19:46:16 crc kubenswrapper[4767]: I0128 19:46:16.143158 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6wdzt_must-gather-5mtn7_9b3e99fa-e0cd-4241-9baf-39ebb0c5362d/gather/0.log" Jan 28 19:46:23 crc kubenswrapper[4767]: I0128 19:46:23.821509 4767 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-6wdzt/must-gather-5mtn7"] Jan 28 19:46:23 crc kubenswrapper[4767]: I0128 19:46:23.822275 4767 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" podUID="9b3e99fa-e0cd-4241-9baf-39ebb0c5362d" containerName="copy" containerID="cri-o://b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6" gracePeriod=2 Jan 28 19:46:23 crc kubenswrapper[4767]: I0128 19:46:23.834693 4767 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-6wdzt/must-gather-5mtn7"] Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.404124 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6wdzt_must-gather-5mtn7_9b3e99fa-e0cd-4241-9baf-39ebb0c5362d/copy/0.log" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.405589 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.464892 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9qc9\" (UniqueName: \"kubernetes.io/projected/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-kube-api-access-t9qc9\") pod \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.465170 4767 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-must-gather-output\") pod \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\" (UID: \"9b3e99fa-e0cd-4241-9baf-39ebb0c5362d\") " Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.472432 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-kube-api-access-t9qc9" (OuterVolumeSpecName: "kube-api-access-t9qc9") pod "9b3e99fa-e0cd-4241-9baf-39ebb0c5362d" (UID: "9b3e99fa-e0cd-4241-9baf-39ebb0c5362d"). InnerVolumeSpecName "kube-api-access-t9qc9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.567896 4767 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9qc9\" (UniqueName: \"kubernetes.io/projected/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-kube-api-access-t9qc9\") on node \"crc\" DevicePath \"\"" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.622634 4767 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-6wdzt_must-gather-5mtn7_9b3e99fa-e0cd-4241-9baf-39ebb0c5362d/copy/0.log" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.623311 4767 generic.go:334] "Generic (PLEG): container finished" podID="9b3e99fa-e0cd-4241-9baf-39ebb0c5362d" containerID="b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6" exitCode=143 Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.623368 4767 scope.go:117] "RemoveContainer" containerID="b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.623411 4767 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-6wdzt/must-gather-5mtn7" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.650840 4767 scope.go:117] "RemoveContainer" containerID="47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.659987 4767 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "9b3e99fa-e0cd-4241-9baf-39ebb0c5362d" (UID: "9b3e99fa-e0cd-4241-9baf-39ebb0c5362d"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.669908 4767 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.783304 4767 scope.go:117] "RemoveContainer" containerID="b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6" Jan 28 19:46:24 crc kubenswrapper[4767]: E0128 19:46:24.783865 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6\": container with ID starting with b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6 not found: ID does not exist" containerID="b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.783898 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6"} err="failed to get container status \"b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6\": rpc error: code = NotFound desc = could not find container \"b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6\": container with ID starting with b344962d9b555efb485357e2961f5bf20be8193df51b1e4958e13ace5611cee6 not found: ID does not exist" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.783919 4767 scope.go:117] "RemoveContainer" containerID="47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd" Jan 28 19:46:24 crc kubenswrapper[4767]: E0128 19:46:24.784152 4767 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd\": container with ID starting with 47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd not found: ID does not exist" containerID="47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.784169 4767 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd"} err="failed to get container status \"47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd\": rpc error: code = NotFound desc = could not find container \"47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd\": container with ID starting with 47963a2cf0e1b7443aee49f78c1d2fd0ecc069224903c8a91f11430e5b17f6dd not found: ID does not exist" Jan 28 19:46:24 crc kubenswrapper[4767]: I0128 19:46:24.809284 4767 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b3e99fa-e0cd-4241-9baf-39ebb0c5362d" path="/var/lib/kubelet/pods/9b3e99fa-e0cd-4241-9baf-39ebb0c5362d/volumes" Jan 28 19:46:26 crc kubenswrapper[4767]: I0128 19:46:26.797691 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:46:26 crc kubenswrapper[4767]: E0128 19:46:26.798249 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:46:38 crc kubenswrapper[4767]: I0128 19:46:38.795832 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:46:38 crc kubenswrapper[4767]: E0128 19:46:38.797141 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:46:50 crc kubenswrapper[4767]: I0128 19:46:50.796069 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:46:50 crc kubenswrapper[4767]: E0128 19:46:50.797083 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:47:04 crc kubenswrapper[4767]: I0128 19:47:04.803047 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:47:04 crc kubenswrapper[4767]: E0128 19:47:04.804535 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:47:19 crc kubenswrapper[4767]: I0128 19:47:19.795360 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:47:19 crc kubenswrapper[4767]: E0128 19:47:19.796158 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:47:32 crc kubenswrapper[4767]: I0128 19:47:32.795974 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:47:32 crc kubenswrapper[4767]: E0128 19:47:32.796899 4767 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-skvzp_openshift-machine-config-operator(f729b63a-09d0-4095-add6-3e40fbd43e1c)\"" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" Jan 28 19:47:45 crc kubenswrapper[4767]: I0128 19:47:45.795644 4767 scope.go:117] "RemoveContainer" containerID="8a61bc29bad805338d10c5dfa1183a73094c6ce8adbf44f6c73f28f0d63664f3" Jan 28 19:47:46 crc kubenswrapper[4767]: I0128 19:47:46.409774 4767 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" event={"ID":"f729b63a-09d0-4095-add6-3e40fbd43e1c","Type":"ContainerStarted","Data":"aac70a655f142ce028d55251a66e1c73f236cffec15a33ad3fc1f75d6545b6df"} Jan 28 19:50:15 crc kubenswrapper[4767]: I0128 19:50:15.455270 4767 patch_prober.go:28] interesting pod/machine-config-daemon-skvzp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 28 19:50:15 crc kubenswrapper[4767]: I0128 19:50:15.455880 4767 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-skvzp" podUID="f729b63a-09d0-4095-add6-3e40fbd43e1c" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515136464213024452 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015136464214017370 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015136452302016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015136452303015457 5ustar corecore